code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a ) -> set[str]:
__A , __A : int = set(a ), [start]
while stack:
__A : List[str] = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase : int = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> int:
hf_model.apply_weight_norm()
__A : List[str] = checkpoint['input_conv.weight_g']
__A : List[Any] = checkpoint['input_conv.weight_v']
__A : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__A : Tuple = checkpoint[F"""upsamples.{i}.1.weight_g"""]
__A : str = checkpoint[F"""upsamples.{i}.1.weight_v"""]
__A : int = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__A : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
__A : List[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
__A : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
__A : Any = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
__A : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
__A : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
__A : int = checkpoint['output_conv.1.weight_g']
__A : Optional[Any] = checkpoint['output_conv.1.weight_v']
__A : Dict = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None , ) -> str:
if config_path is not None:
__A : List[str] = SpeechTaHifiGanConfig.from_pretrained(a )
else:
__A : str = SpeechTaHifiGanConfig()
__A : List[Any] = SpeechTaHifiGan(a )
__A : List[str] = torch.load(a )
load_weights(orig_checkpoint['model']['generator'] , a , a )
__A : str = np.load(a )
__A : int = stats[0].reshape(-1 )
__A : Any = stats[1].reshape(-1 )
__A : Optional[int] = torch.from_numpy(a ).float()
__A : Any = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCAmelCase : int = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
import math
def _SCREAMING_SNAKE_CASE ( a , a = 0 , a = 0 ) -> list:
__A : List[str] = end or len(a )
for i in range(a , a ):
__A : List[str] = i
__A : Optional[int] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A : List[str] = array[temp_index - 1]
temp_index -= 1
__A : Optional[Any] = temp_index_value
return array
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None: # Max Heap
__A : int = index
__A : Tuple = 2 * index + 1 # Left Node
__A : List[str] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A : List[str] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A : Dict = right_index
if largest != index:
__A , __A : Tuple = array[largest], array[index]
heapify(a , a , a )
def _SCREAMING_SNAKE_CASE ( a ) -> list:
__A : Union[str, Any] = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
__A , __A : Optional[Any] = array[0], array[i]
heapify(a , 0 , a )
return array
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> int:
__A : Any = low
__A : Any = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A : List[str] = array[j], array[i]
i += 1
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) == 0:
return array
__A : Any = 2 * math.ceil(math.loga(len(a ) ) )
__A : Union[str, Any] = 16
return intro_sort(a , 0 , len(a ) , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
__A : Tuple = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
__A : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
__A : Optional[int] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[Any] = input('''Enter numbers separated by a comma : ''').strip()
UpperCAmelCase : Optional[int] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from collections import deque
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : int = process_name # process name
__A : Union[str, Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__A : str = arrival_time
__A : Tuple = burst_time # remaining burst time
__A : int = 0 # total time of the process wait in ready queue
__A : Tuple = 0 # time from arrival time to completion time
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , ):
# total number of mlfq's queues
__A : Any = number_of_queues
# time slice of queues that round robin algorithm applied
__A : Optional[int] = time_slices
# unfinished process is in this ready_queue
__A : Dict = queue
# current time
__A : Tuple = current_time
# finished process is in this sequence queue
__A : deque[Process] = deque()
def UpperCAmelCase_ ( self ):
__A : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self , _A ):
__A : Dict = []
for i in range(len(_A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = []
for i in range(len(_A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self , _A ):
__A : str = []
for i in range(len(_A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self , _A ):
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self , _A ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self , _A ):
__A : deque[Process] = deque() # sequence deque of finished process
while len(_A ) != 0:
__A : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__A : List[str] = 0
# set the process's turnaround time because it is finished
__A : List[Any] = self.current_time - cp.arrival_time
# set the completion time
__A : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self , _A , _A ):
__A : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_A ) ):
__A : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__A : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__A : Any = 0
# set the finish time
__A : str = self.current_time
# update the process' turnaround time because it is finished
__A : Optional[int] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__A , __A : Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCAmelCase : Any = Process('''P1''', 0, 53)
UpperCAmelCase : Dict = Process('''P2''', 0, 17)
UpperCAmelCase : List[str] = Process('''P3''', 0, 68)
UpperCAmelCase : Optional[int] = Process('''P4''', 0, 24)
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : int = [17, 25]
UpperCAmelCase : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCAmelCase : List[Any] = Process('''P1''', 0, 53)
UpperCAmelCase : Optional[Any] = Process('''P2''', 0, 17)
UpperCAmelCase : List[str] = Process('''P3''', 0, 68)
UpperCAmelCase : Any = Process('''P4''', 0, 24)
UpperCAmelCase : Dict = 3
UpperCAmelCase : List[str] = [17, 25]
UpperCAmelCase : Dict = deque([Pa, Pa, Pa, Pa])
UpperCAmelCase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCAmelCase : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Tuple = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['''ConditionalDetrFeatureExtractor''']
UpperCAmelCase : Tuple = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Tuple = -1
__A : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Tuple = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__A : Any = TextStreamer(_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__A : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : str = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Optional[int] = -1
__A : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Union[str, Any] = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__A : List[str] = TextIteratorStreamer(_A )
__A : str = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__A : Any = Thread(target=model.generate , kwargs=_A )
thread.start()
__A : List[Any] = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Union[str, Any] = -1
__A : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : Any = greedy_ids[:, input_ids.shape[1] :]
__A : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__A : List[Any] = TextStreamer(_A , skip_prompt=_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__A : Dict = cs.out[:-1]
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__A : Union[str, Any] = AutoTokenizer.from_pretrained('distilgpt2' )
__A : Optional[int] = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(_A )
__A : Optional[int] = -1
__A : Optional[int] = torch.ones((1, 5) , device=_A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__A : Tuple = TextStreamer(_A , skip_special_tokens=_A )
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__A : str = cs.out[:-1] # Remove the final "\n"
__A : str = tokenizer(_A , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Union[str, Any] = -1
__A : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Any = TextIteratorStreamer(_A , timeout=0.0_0_1 )
__A : Dict = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__A : Optional[int] = Thread(target=model.generate , kwargs=_A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A ):
__A : int = ''
for new_text in streamer:
streamer_text += new_text
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase : Tuple = 2
class _A:
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
_A="<s>" , _A="<pad>" , _A="</s>" , _A="<unk>" , _A=None , ):
__A , __A , __A , __A : Tuple = bos, unk, pad, eos
__A : Optional[int] = []
__A : Dict = []
__A : Optional[Any] = {}
__A : List[str] = self.add_symbol(_A )
__A : Tuple = self.add_symbol(_A )
__A : List[Any] = self.add_symbol(_A )
__A : Dict = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
__A : int = len(self.symbols )
def __eq__( self , _A ):
return self.indices == other.indices
def __getitem__( self , _A ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , _A ):
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : Dict = cls()
d.add_from_file(_A )
return d
def UpperCAmelCase_ ( self , _A , _A=1 , _A=False ):
if word in self.indices and not overwrite:
__A : Union[str, Any] = self.indices[word]
__A : Optional[int] = self.count[idx] + n
return idx
else:
__A : Tuple = len(self.symbols )
__A : Optional[Any] = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def UpperCAmelCase_ ( self , _A ):
return 0
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ):
try:
with open(_A , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_A ) )
return
__A : Dict = f.readlines()
__A : List[str] = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
__A , __A : Optional[int] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__A : Dict = True
__A , __A : Tuple = line.rsplit(' ' , 1 )
else:
__A : List[str] = False
__A : Union[str, Any] = int(_A )
__A : Dict = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__A : List[Any] = dict((re.sub(r'@@$' , '' , a ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , a ), v) for k, v in d.items() )
__A : List[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__A : Union[str, Any] = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
# prep
if not os.path.exists(a ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(a , exist_ok=a )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__A : Optional[int] = os.path.join(a , 'checkpoint.pt' )
if not os.path.isfile(a ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__A : Dict = torch.load(a , map_location='cpu' )
__A : Optional[int] = chkpt['cfg']['model']
# dicts
__A : List[str] = os.path.join(a , 'dict.txt' )
if not os.path.isfile(a ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__A : Optional[int] = Dictionary.load(a )
__A : int = rewrite_dict_keys(src_dict.indices )
__A : List[str] = len(a )
__A : int = os.path.join(a , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# merges_file (bpecodes)
__A : int = os.path.join(a , 'bpecodes' )
if not os.path.isfile(a ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__A : List[str] = os.path.join(a , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(a , a )
# model config
__A : Tuple = os.path.join(a , 'config.json' )
__A : Any = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# tokenizer config
__A : Optional[int] = os.path.join(a , a )
__A : Optional[int] = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# model
__A : Tuple = chkpt['model']
# remove unneeded keys
__A : Dict = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(a , a )
__A : Dict = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__A : Tuple = model_state_dict.pop(a )
else:
__A : int = model_state_dict.pop(a )
__A : Any = BioGptConfig.from_pretrained(a )
__A : Optional[Any] = BioGptForCausalLM(a )
# check that it loads ok
model_new.load_state_dict(a )
# save
__A : Tuple = os.path.join(a , a )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(a , a )
print('Conversion is done!' )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) <= 1:
return lst
__A : str = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A : List[Any] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280 | 1 |
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Union[str, Any] = val
__A : List[str] = None
__A : List[Any] = None
def UpperCAmelCase_ ( self , _A ):
if self.val:
if val < self.val:
if self.left is None:
__A : Any = Node(_A )
else:
self.left.insert(_A )
elif val > self.val:
if self.right is None:
__A : List[Any] = Node(_A )
else:
self.right.insert(_A )
else:
__A : Any = val
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
# Recursive traversal
if root:
inorder(root.left , a )
res.append(root.val )
inorder(root.right , a )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
# Build BST
if len(a ) == 0:
return arr
__A : Dict = Node(arr[0] )
for i in range(1 , len(a ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : Union[str, Any] = []
inorder(a , a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 280 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ):
__A : str = parent
__A : str = batch_size
__A : List[Any] = seq_length
__A : List[Any] = is_training
__A : Any = use_input_mask
__A : Tuple = use_token_type_ids
__A : int = use_labels
__A : Union[str, Any] = vocab_size
__A : Dict = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : int = intermediate_size
__A : Union[str, Any] = hidden_act
__A : int = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : int = type_sequence_label_size
__A : int = initializer_range
__A : Union[str, Any] = num_labels
__A : Tuple = num_choices
__A : Optional[Any] = relative_attention
__A : Tuple = position_biased_input
__A : List[Any] = pos_att_type
__A : Optional[Any] = scope
def UpperCAmelCase_ ( self ):
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[str] = None
if self.use_input_mask:
__A : str = random_attention_mask([self.batch_size, self.seq_length] )
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : Optional[int] = None
__A : Optional[Any] = None
if self.use_labels:
__A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[Any] = TFDebertaVaModel(config=_A )
__A : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Optional[int] = [input_ids, input_mask]
__A : Dict = model(_A )
__A : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : int = TFDebertaVaForMaskedLM(config=_A )
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Any = self.num_labels
__A : str = TFDebertaVaForSequenceClassification(config=_A )
__A : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : str = self.num_labels
__A : Tuple = TFDebertaVaForTokenClassification(config=_A )
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Optional[Any] = TFDebertaVaForQuestionAnswering(config=_A )
__A : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Optional[Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Optional[int] = config_and_inputs
__A : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[int] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : List[Any] = False
def UpperCAmelCase_ ( self ):
__A : Tuple = TFDebertaVaModelTester(self )
__A : int = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCAmelCase_ ( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(_A )
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase_ ( self ):
pass
@slow
def UpperCAmelCase_ ( self ):
__A : int = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__A : Optional[int] = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__A : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__A : Optional[Any] = model(_A , attention_mask=_A )[0]
__A : List[str] = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Any = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ['''GLPNFeatureExtractor''']
UpperCAmelCase : Optional[int] = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''blip_2_vision_model'''
def __init__( self , _A=1408 , _A=6144 , _A=39 , _A=16 , _A=224 , _A=14 , _A="gelu" , _A=0.0_0_0_0_1 , _A=0.0 , _A=1e-1_0 , _A=True , **_A , ):
super().__init__(**_A )
__A : Dict = hidden_size
__A : Dict = intermediate_size
__A : List[str] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : List[Any] = patch_size
__A : List[str] = image_size
__A : Optional[Any] = initializer_range
__A : Optional[Any] = attention_dropout
__A : List[str] = layer_norm_eps
__A : Dict = hidden_act
__A : List[str] = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
cls._set_token_in_kwargs(_A )
__A , __A : Any = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__A : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''blip_2_qformer'''
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , _A=1e-1_2 , _A=0 , _A="absolute" , _A=2 , _A=1408 , **_A , ):
super().__init__(pad_token_id=_A , **_A )
__A : Any = vocab_size
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Any = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = initializer_range
__A : str = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : str = cross_attention_frequency
__A : Optional[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
cls._set_token_in_kwargs(_A )
__A , __A : Optional[int] = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__A : str = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''blip-2'''
UpperCamelCase : str = True
def __init__( self , _A=None , _A=None , _A=None , _A=32 , **_A ):
super().__init__(**_A )
if vision_config is None:
__A : Any = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__A : str = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__A : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A : Dict = BlipaVisionConfig(**_A )
__A : Dict = BlipaQFormerConfig(**_A )
__A : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_A )
__A : Dict = self.text_config.tie_word_embeddings
__A : str = self.text_config.is_encoder_decoder
__A : Dict = num_query_tokens
__A : Any = self.vision_config.hidden_size
__A : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A : Dict = 1.0
__A : str = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls , _A , _A , _A , **_A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def UpperCAmelCase_ ( self ):
__A : Any = copy.deepcopy(self.__dict__ )
__A : Any = self.vision_config.to_dict()
__A : Optional[Any] = self.qformer_config.to_dict()
__A : Dict = self.text_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase : int = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__A : int = list(s_dict.keys() )
for key in keys:
__A : Union[str, Any] = r'.*/layers_(\d+)'
__A : Dict = key
if re.match(a , a ):
__A : List[str] = re.sub(r'layers_(\d+)' , r'block/\1/layer' , a )
__A : List[Any] = r'(encoder|decoder)\/'
if re.match(a , a ):
__A : Tuple = re.match(a , a ).groups()
if groups[0] == "encoder":
__A : Union[str, Any] = re.sub(r'/mlp/' , r'/1/mlp/' , a )
__A : Any = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , a )
elif groups[0] == "decoder":
__A : Optional[int] = re.sub(r'/mlp/' , r'/2/mlp/' , a )
__A : Optional[int] = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__A : str = new_key.replace(a , a )
print(F"""{key} -> {new_key}""" )
__A : Dict = s_dict.pop(a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__A : Any = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__A : List[Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__A : Optional[int] = s_dict[key].shape[0]
__A : Tuple = s_dict[key]
for idx in range(a ):
__A : str = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(a )
return s_dict
UpperCAmelCase : Tuple = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(a , 'r' ) as f:
__A : List[str] = f.read()
__A : int = re.findall(r'(.*) = ([0-9.]*)' , a )
__A : Optional[Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__A : str = float(a ) if '.' in value else int(a )
__A : Any = re.findall(r'(.*activations) = \(\'(.*)\',\)' , a )[0]
__A : List[Any] = str(activation[1] )
__A : List[str] = num_experts
__A : Union[str, Any] = SwitchTransformersConfig(**a )
return config
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a="./" , a=8 ) -> List[Any]:
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
__A : List[Any] = checkpoints.load_tax_checkpoint(a )
if gin_file is not None:
__A : str = convert_gin_to_config(a , a )
else:
__A : int = SwitchTransformersConfig.from_pretrained(a )
__A : Union[str, Any] = SwitchTransformersForConditionalGeneration(a )
__A : Optional[Any] = flax_params['target']
__A : Optional[int] = flatten_dict(a , sep='/' )
__A : Union[str, Any] = rename_keys(a )
__A : Any = unflatten_dict(a , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a , a )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''blenderbot-small'''
UpperCamelCase : Any = ['''past_key_values''']
UpperCamelCase : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _A=50265 , _A=512 , _A=8 , _A=2048 , _A=16 , _A=8 , _A=2048 , _A=16 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=512 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1 , _A=False , _A=0 , _A=1 , _A=2 , _A=2 , **_A , ):
__A : List[str] = vocab_size
__A : Tuple = max_position_embeddings
__A : Dict = d_model
__A : Any = encoder_ffn_dim
__A : str = encoder_layers
__A : Union[str, Any] = encoder_attention_heads
__A : Tuple = decoder_ffn_dim
__A : Optional[int] = decoder_layers
__A : str = decoder_attention_heads
__A : Union[str, Any] = dropout
__A : Any = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Optional[int] = activation_function
__A : Any = init_std
__A : Union[str, Any] = encoder_layerdrop
__A : str = decoder_layerdrop
__A : List[Any] = use_cache
__A : Optional[int] = encoder_layers
__A : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class _A( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__A : Any = {0: 'batch'}
__A : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : Tuple = {0: 'batch', 1: 'decoder_sequence'}
__A : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__A : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__A , __A : Union[str, Any] = self.num_layers
for i in range(_A ):
__A : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__A : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__A : Any = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCAmelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__A : Tuple = super().outputs
else:
__A : List[Any] = super(_A , self ).outputs
if self.use_past:
__A , __A : List[str] = self.num_layers
for i in range(_A ):
__A : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
__A : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__A : Optional[int] = seq_length if not self.use_past else 1
__A : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
__A : Dict = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__A : Optional[int] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Union[str, Any] = common_inputs['input_ids'].shape
__A : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
__A , __A : Tuple = self.num_attention_heads
__A : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : Optional[int] = decoder_seq_length + 3
__A : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__A : str = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A , _A )] , dim=1 )
__A : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__A , __A : str = self.num_layers
__A : int = min(_A , _A )
__A : Tuple = max(_A , _A ) - min_num_layers
__A : List[str] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__A : List[str] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : str = seqlen + 2
__A , __A : Any = self.num_layers
__A , __A : Any = self.num_attention_heads
__A : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__A : int = common_inputs['attention_mask'].dtype
__A : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__A : Dict = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__A : List[Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__A : List[Any] = tokenizer.num_special_tokens_to_add(_A )
__A : Optional[Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__A : Optional[int] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__A : Optional[int] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
if self.task in ["default", "seq2seq-lm"]:
__A : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
__A : Dict = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__A : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
if self.task in ["default", "seq2seq-lm"]:
__A : Optional[int] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__A : Optional[Any] = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
| 280 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''timm_backbone'''
def __init__( self , _A=None , _A=3 , _A=True , _A=True , _A=None , **_A , ):
super().__init__(**_A )
__A : Any = backbone
__A : Optional[int] = num_channels
__A : List[Any] = features_only
__A : List[str] = use_pretrained_backbone
__A : int = True
__A : List[Any] = out_indices if out_indices is not None else (-1,)
| 280 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ):
__A : Optional[Any] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , _A , _A , _A ):
if len(_A ) == 0 or len(_A ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_A ) )
if isinstance(_A , _A ):
__A : Optional[int] = [sequences]
__A : Any = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A=ZeroShotClassificationArgumentHandler() , *_A , **_A ):
__A : Optional[int] = args_parser
super().__init__(*_A , **_A )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase_ ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase_ ( self , _A , _A=True , _A=True , _A=TruncationStrategy.ONLY_FIRST , **_A ):
__A : Union[str, Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__A : Tuple = self.tokenizer.eos_token
try:
__A : Tuple = self.tokenizer(
_A , add_special_tokens=_A , return_tensors=_A , padding=_A , truncation=_A , )
except Exception as e:
if "too short" in str(_A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__A : Optional[int] = self.tokenizer(
_A , add_special_tokens=_A , return_tensors=_A , padding=_A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase_ ( self , **_A ):
if kwargs.get('multi_class' , _A ) is not None:
__A : Optional[int] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__A : int = {}
if "candidate_labels" in kwargs:
__A : Union[str, Any] = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__A : int = kwargs['hypothesis_template']
__A : Dict = {}
if "multi_label" in kwargs:
__A : Union[str, Any] = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , _A , *_A , **_A , ):
if len(_A ) == 0:
pass
elif len(_A ) == 1 and "candidate_labels" not in kwargs:
__A : Optional[int] = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(_A , **_A )
def UpperCAmelCase_ ( self , _A , _A=None , _A="This example is {}." ):
__A , __A : Union[str, Any] = self._args_parser(_A , _A , _A )
for i, (candidate_label, sequence_pair) in enumerate(zip(_A , _A ) ):
__A : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_A ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self , _A ):
__A : Dict = inputs['candidate_label']
__A : Optional[Any] = inputs['sequence']
__A : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
__A : Optional[Any] = self.model(**_A )
__A : Optional[Any] = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : List[Any] = [outputs['candidate_label'] for outputs in model_outputs]
__A : Optional[Any] = [outputs['sequence'] for outputs in model_outputs]
__A : Optional[Any] = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__A : str = logits.shape[0]
__A : Optional[int] = len(_A )
__A : Tuple = N // n
__A : Optional[int] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__A : Union[str, Any] = self.entailment_id
__A : Tuple = -1 if entailment_id == 0 else 0
__A : Dict = reshaped_outputs[..., [contradiction_id, entailment_id]]
__A : Any = np.exp(_A ) / np.exp(_A ).sum(-1 , keepdims=_A )
__A : List[str] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__A : Any = reshaped_outputs[..., self.entailment_id]
__A : Union[str, Any] = np.exp(_A ) / np.exp(_A ).sum(-1 , keepdims=_A )
__A : Union[str, Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 280 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''roc_bert'''
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-1_2 , _A=True , _A=0 , _A="absolute" , _A=None , _A=True , _A=True , _A=768 , _A=910 , _A=512 , _A=24858 , _A=True , **_A , ):
__A : Optional[Any] = vocab_size
__A : Optional[int] = max_position_embeddings
__A : Optional[int] = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : int = intermediate_size
__A : Optional[Any] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = initializer_range
__A : List[Any] = type_vocab_size
__A : List[Any] = layer_norm_eps
__A : Tuple = use_cache
__A : Optional[int] = enable_pronunciation
__A : Union[str, Any] = enable_shape
__A : Optional[int] = pronunciation_embed_dim
__A : Union[str, Any] = pronunciation_vocab_size
__A : List[Any] = shape_embed_dim
__A : str = shape_vocab_size
__A : List[str] = concat_input
__A : int = position_embedding_type
__A : List[Any] = classifier_dropout
super().__init__(pad_token_id=_A , **_A )
| 280 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 1 |
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
__A : List[Any] = min(a ) # min() finds the minimum value
__A : int = max(a ) # max() finds the maximum value
__A : List[str] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__A : str = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a , a ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__A : Optional[Any] = 0
for count in range(a ):
while holes[count] > 0:
holes[count] -= 1
__A : int = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a )
print('Sorted order is:' , ' '.join(a ) )
if __name__ == "__main__":
main()
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCAmelCase : Union[str, Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[str]:
with open(a , 'r' , encoding='utf-8' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Dict = collections.OrderedDict()
__A : Optional[int] = collections.OrderedDict()
__A : Optional[int] = collections.OrderedDict()
with open(a , 'r' , encoding='utf-8' ) as f:
__A : Tuple = f.readlines()
__A : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(a ):
__A : List[str] = b
__A : Tuple = idx
for wd in b:
__A : Union[str, Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|startoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__A : Any = do_clean_text
__A , __A , __A , __A : str = load_vocab_and_emoji(_A , _A )
__A : Dict = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase_ ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCAmelCase_ ( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _A ):
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def UpperCAmelCase_ ( self , _A ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.subword_tokenizer.convert_id_to_token(_A )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = ''.join(_A ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__A : Dict = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = 0
if os.path.isdir(_A ):
__A : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__A : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__A : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : List[str] = token_index
writer.write(','.join(_A ) + '\n' )
index += 1
with open(_A , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : List[Any] = vocab # same as swe
__A : int = ids_to_tokens # same as bpe
__A : Any = emoji
__A : Dict = np.max([len(_A ) for w in self.vocab.keys()] )
__A : str = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__A : Dict = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__A : Optional[int] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__A : Any = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__A : Optional[int] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__A : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__A : Tuple = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__A : Optional[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__A : Optional[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def UpperCAmelCase_ ( self , _A ):
__A : int = self.content_repattera.sub('<URL>' , _A )
__A : Optional[int] = self.content_repattera.sub('<EMAIL>' , _A )
__A : int = self.content_repattera.sub('<TEL>' , _A )
__A : str = self.content_repattera.sub('<DATE>' , _A )
__A : Optional[Any] = self.content_repattera.sub('<DATE>' , _A )
__A : List[Any] = self.content_repattera.sub('<PRICE>' , _A )
__A : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__A : List[str] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Tuple = text.replace(' ' , '<SP>' )
__A : List[str] = text.replace(' ' , '<SP>' )
__A : int = text.replace('\r\n' , '<BR>' )
__A : Optional[Any] = text.replace('\n' , '<BR>' )
__A : Any = text.replace('\r' , '<BR>' )
__A : Tuple = text.replace('\t' , '<TAB>' )
__A : Optional[Any] = text.replace('—' , 'ー' )
__A : str = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__A : List[str] = text.replace(_A , _A )
if clean:
__A : Union[str, Any] = self.clean_text(_A )
def check_simbol(_A ):
__A : List[Any] = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
__A : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(_A ):
__A : int = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
__A : int = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
__A : Union[str, Any] = 0
__A : Optional[int] = []
while pos < len(_A ):
__A : str = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__A : List[str] = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
__A : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
__A : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
__A , __A , __A : List[str] = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
__A : Optional[int] = e
else:
__A : Any = pos + 1
__A : int = text[pos:end]
if check_simbol(_A ):
result.append('<KIGOU>' )
elif checkuae(_A ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__A : Tuple = end
return result
def UpperCAmelCase_ ( self , _A , _A="\n" ):
__A : Optional[Any] = []
__A : int = []
__A : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__A : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('utf-8' , errors='replace' ) )
__A : List[Any] = ''.join(_A )
return text
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Any = '''▁'''
UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase : Tuple = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase : int = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase : List[Any] = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
UpperCAmelCase : List[str] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ["input_ids"]
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : int = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : int = do_lower_case
__A : Any = sentencepiece_model_ckpt
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A : Any = self.load_vocab(filepath=_A )
else:
__A : Optional[Any] = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
__A : Tuple = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase_ ( self , _A ):
if text is None:
return None
__A : List[str] = self.tokenize(_A )
__A , __A : Tuple = '', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
__A : List[str] = self.SP_CHAR_MAPPING.get(_A )
else:
__A : str = unicodedata.normalize('NFKC' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
__A , __A , __A : Dict = normalized_text, [], 0
if self.do_lower_case:
__A : Union[str, Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A : List[str] = token[1:]
__A : Dict = text[offset:].index(_A ) + offset
__A : List[str] = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A : str = end
return token_mapping
@property
def UpperCAmelCase_ ( self ):
return len(self.vocab )
def UpperCAmelCase_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
__A : List[Any] = self.__dict__.copy()
__A : Dict = None
return state
def __setstate__( self , _A ):
__A : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : int = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase_ ( self , _A ):
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCAmelCase_ ( self , _A , _A=False , _A=64 , _A=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__A : Union[str, Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__A : str = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__A : Optional[Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__A : List[Any] = self.sp_model.EncodeAsPieces(_A )
else:
__A : Optional[int] = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
__A : int = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
__A : List[str] = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
__A : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : Optional[Any] = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = self.convert_ids_to_tokens(_A )
__A : Tuple = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCAmelCase_ ( self , _A , _A=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : str = [self.cls_token_id]
__A : Union[str, Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase_ ( self , _A , _A=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase_ ( self , _A , _A=None , _A=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCAmelCase_ ( self , _A ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
__A : Union[str, Any] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = {}
with io.open(_A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_A ):
__A : Optional[Any] = line.rstrip('\n' )
__A : List[str] = int(_A )
return token_to_idx
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = 0
if os.path.isdir(_A ):
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__A : Dict = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : Tuple = token_index
writer.write(token + '\n' )
index += 1
__A : Dict = os.path.join(_A , 'sentencepiece.bpe.model' )
with open(_A , 'wb' ) as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__A : List[str] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self ):
__A : Any = 'sshleifer/tiny-gpt2'
__A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , )
__A : List[str] = TensorFlowBenchmark(_A )
__A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : int = 'sgugger/tiny-distilbert-classification'
__A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , )
__A : Union[str, Any] = TensorFlowBenchmark(_A )
__A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : Tuple = 'sshleifer/tiny-gpt2'
__A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__A : List[str] = TensorFlowBenchmark(_A )
__A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : List[str] = 'sshleifer/tiny-gpt2'
__A : Tuple = AutoConfig.from_pretrained(_A )
__A : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , )
__A : str = TensorFlowBenchmark(_A , [config] )
__A : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : int = 'sshleifer/tiny-gpt2'
__A : int = AutoConfig.from_pretrained(_A )
__A : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__A : Optional[Any] = TensorFlowBenchmark(_A , [config] )
__A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : Dict = 'sshleifer/tiny-gpt2'
__A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__A : int = TensorFlowBenchmark(_A )
__A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
__A : str = 'sshleifer/tiny-gpt2'
__A : List[str] = AutoConfig.from_pretrained(_A )
__A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__A : str = TensorFlowBenchmark(_A , [config] )
__A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'patrickvonplaten/t5-tiny-random'
__A : Any = AutoConfig.from_pretrained(_A )
__A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__A : int = TensorFlowBenchmark(_A , configs=[config] )
__A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCAmelCase_ ( self ):
__A : int = 'sshleifer/tiny-gpt2'
__A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_A , multi_process=_A , )
__A : Dict = TensorFlowBenchmark(_A )
__A : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
__A : Tuple = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_A , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_A , 'env.csv' ) , multi_process=_A , )
__A : Optional[int] = TensorFlowBenchmark(_A )
benchmark.run()
self.assertTrue(Path(os.path.join(_A , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'env.csv' ) ).exists() )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_A ):
self.assertTrue(hasattr(_A , 'sequential' ) )
self.assertTrue(hasattr(_A , 'cumulative' ) )
self.assertTrue(hasattr(_A , 'current' ) )
self.assertTrue(hasattr(_A , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , 'log.txt' ) , log_print=_A , trace_memory_line_by_line=_A , eager_mode=_A , multi_process=_A , )
__A : Dict = TensorFlowBenchmark(_A )
__A : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_A , 'log.txt' ) ).exists() )
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _A:
"""simple docstring"""
def __init__( self , _A ):
if isinstance(_A , _A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__A : List[Any] = deepcopy(_A )
elif os.path.exists(_A ):
with io.open(_A , 'r' , encoding='utf-8' ) as f:
__A : List[str] = json.load(_A )
else:
try:
__A : Union[str, Any] = baseaa.urlsafe_baadecode(_A ).decode('utf-8' )
__A : List[str] = json.loads(_A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__A : Dict = config
self.set_stage_and_offload()
def UpperCAmelCase_ ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__A : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
__A : Optional[int] = False
if self.is_zeroa() or self.is_zeroa():
__A : Optional[Any] = set(['cpu', 'nvme'] )
__A : Dict = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__A : str = True
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = self.config
# find the config node of interest if it exists
__A : Dict = ds_key_long.split('.' )
__A : int = nodes.pop()
for node in nodes:
__A : str = config.get(_A )
if config is None:
return None, ds_key
return config, ds_key
def UpperCAmelCase_ ( self , _A , _A=None ):
__A , __A : int = self.find_config_node(_A )
if config is None:
return default
return config.get(_A , _A )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : str = self.config
# find the config node of interest if it exists
__A : Tuple = ds_key_long.split('.' )
for node in nodes:
__A : Optional[int] = config
__A : Any = config.get(_A )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_A )
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = self.get_value(_A )
return False if value is None else bool(_A )
def UpperCAmelCase_ ( self , _A ):
__A : Any = self.get_value(_A )
return False if value is None else not bool(_A )
def UpperCAmelCase_ ( self ):
return self._stage == 2
def UpperCAmelCase_ ( self ):
return self._stage == 3
def UpperCAmelCase_ ( self ):
return self._offload
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Union[str, Any] = engine
def UpperCAmelCase_ ( self , _A , **_A ):
# runs backpropagation and handles mixed precision
self.engine.backward(_A , **_A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
super().__init__(_A , device_placement=_A , scaler=_A )
__A : str = hasattr(self.optimizer , 'overflow' )
def UpperCAmelCase_ ( self , _A=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def UpperCAmelCase_ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def UpperCAmelCase_ ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A ):
super().__init__(_A , _A )
def UpperCAmelCase_ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _A:
"""simple docstring"""
def __init__( self , _A , _A=0.0_0_1 , _A=0 , **_A ):
__A : Dict = params
__A : int = lr
__A : Optional[int] = weight_decay
__A : Dict = kwargs
class _A:
"""simple docstring"""
def __init__( self , _A , _A=None , _A=0 , **_A ):
__A : str = optimizer
__A : str = total_num_steps
__A : int = warmup_num_steps
__A : Union[str, Any] = kwargs
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''xlm-roberta'''
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-1_2 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__A : List[Any] = vocab_size
__A : Any = hidden_size
__A : List[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : str = hidden_act
__A : Optional[int] = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : str = max_position_embeddings
__A : List[str] = type_vocab_size
__A : List[Any] = initializer_range
__A : Dict = layer_norm_eps
__A : Optional[Any] = position_embedding_type
__A : Union[str, Any] = use_cache
__A : Any = classifier_dropout
class _A( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
if self.task == "multiple-choice":
__A : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : str = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''encodec'''
def __init__( self , _A=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , _A=24000 , _A=1 , _A=False , _A=None , _A=None , _A=128 , _A=32 , _A=1 , _A=[8, 5, 4, 2] , _A="weight_norm" , _A=7 , _A=7 , _A=3 , _A=2 , _A=True , _A="reflect" , _A=2 , _A=2 , _A=1.0 , _A=1024 , _A=None , _A=True , **_A , ):
__A : Any = target_bandwidths
__A : List[Any] = sampling_rate
__A : List[str] = audio_channels
__A : int = normalize
__A : int = chunk_length_s
__A : str = overlap
__A : Optional[Any] = hidden_size
__A : Dict = num_filters
__A : Dict = num_residual_layers
__A : List[Any] = upsampling_ratios
__A : Dict = norm_type
__A : Dict = kernel_size
__A : Optional[Any] = last_kernel_size
__A : List[Any] = residual_kernel_size
__A : List[Any] = dilation_growth_rate
__A : Tuple = use_causal_conv
__A : str = pad_mode
__A : Optional[int] = compress
__A : str = num_lstm_layers
__A : Dict = trim_right_ratio
__A : Tuple = codebook_size
__A : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__A : Tuple = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_A )
@property
def UpperCAmelCase_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase_ ( self ):
__A : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase_ ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : Any = 32
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a = 16 ) -> Any:
__A : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : int = DatasetDict(
{
'train': dataset['train'].select(a ),
'validation': dataset['train'].select(a ),
'test': dataset['validation'],
} )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
__A : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : int = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : List[str] = 16
elif accelerator.mixed_precision != "no":
__A : List[str] = 8
else:
__A : List[Any] = None
return tokenizer.pad(
a , padding='longest' , max_length=a , pad_to_multiple_of=a , return_tensors='pt' , )
# Instantiate dataloaders.
__A : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
__A : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
__A : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader, test_dataloader
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
# New Code #
__A : Tuple = []
# Download the dataset
__A : List[Any] = load_dataset('glue' , 'mrpc' )
# Create our splits
__A : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__A : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Tuple = config['lr']
__A : str = int(config['num_epochs'] )
__A : Any = int(config['seed'] )
__A : Optional[Any] = int(config['batch_size'] )
__A : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__A : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(a )
# New Code #
# Create our folds:
__A : Optional[Any] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
__A : int = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a ):
__A , __A , __A : Optional[Any] = get_fold_dataloaders(
a , a , a , a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : str = model.to(accelerator.device )
# Instantiate optimizer
__A : Tuple = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
__A : Optional[int] = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=1_00 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A : int = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : Optional[Any] = model(**a )
__A : Optional[int] = outputs.loss
__A : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : List[str] = model(**a )
__A : Any = outputs.logits.argmax(dim=-1 )
__A , __A : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a , references=a , )
__A : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a )
# New Code #
# We also run predictions on the test set at the very end
__A : Any = []
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Dict = model(**a )
__A : str = outputs.logits
__A , __A : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__A : List[Any] = torch.cat(a , dim=0 )
__A : int = torch.stack(a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__A : Any = metric.compute(predictions=a , references=a )
accelerator.print('Average test metrics from all folds:' , a )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a , default=a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=a , default=3 , help='The number of splits to perform across the dataset' )
__A : List[Any] = parser.parse_args()
__A : List[str] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not isinstance(a , a ):
__A : Optional[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a )
if is_prime(a ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Optional[Any] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
__A : Tuple = [0] * len(a )
__A : Tuple = []
__A : Dict = [1] * len(a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a ) ):
if indegree[i] == 0:
queue.append(a )
while queue:
__A : int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a )
print(max(a ) )
# Adjacency list of Graph
UpperCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 280 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase : Any = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase : Tuple = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''maskformer'''
UpperCamelCase : Any = {'''hidden_size''': '''mask_feature_size'''}
UpperCamelCase : List[Any] = ['''resnet''', '''swin''']
UpperCamelCase : Any = ['''detr''']
def __init__( self , _A = 256 , _A = 256 , _A = 0.1 , _A = False , _A = None , _A = None , _A = 0.0_2 , _A = 1.0 , _A = 1.0 , _A = 1.0 , _A = 2_0.0 , _A = None , **_A , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__A : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Tuple = backbone_config.pop('model_type' )
__A : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__A : Dict = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__A : int = DetrConfig()
else:
# verify that the decoder is supported
__A : Dict = (
decoder_config.pop('model_type' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
__A : Tuple = CONFIG_MAPPING[decoder_type]
__A : Optional[int] = config_class.from_dict(_A )
__A : Dict = backbone_config
__A : List[str] = decoder_config
# main feature dimension for the model
__A : Optional[Any] = fpn_feature_size
__A : int = mask_feature_size
# initializer
__A : Any = init_std
__A : Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
__A : Any = cross_entropy_weight
__A : Any = dice_weight
__A : List[str] = mask_weight
__A : Dict = use_auxiliary_loss
__A : int = no_object_weight
__A : Any = output_auxiliary_logits
__A : Any = self.decoder_config.encoder_attention_heads
__A : Optional[int] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , _A , **_A ):
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = copy.deepcopy(self.__dict__ )
__A : Dict = self.backbone_config.to_dict()
__A : List[Any] = self.decoder_config.to_dict()
__A : Optional[Any] = self.__class__.model_type
return output
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class _A( enum.Enum ):
"""simple docstring"""
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[Any] = 1
@add_end_docstrings(snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''generated'''
def __init__( self , *_A , **_A ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , **_A , ):
__A : Union[str, Any] = {}
if truncation is not None:
__A : Any = truncation
__A : str = generate_kwargs
__A : List[Any] = {}
if return_tensors is not None and return_type is None:
__A : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__A : List[str] = return_type
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if stop_sequence is not None:
__A : Optional[Any] = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__A : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , _A , _A , _A ):
return True
def UpperCAmelCase_ ( self , *_A , _A ):
__A : Any = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__A : Any = ([prefix + arg for arg in args[0]],)
__A : List[str] = True
elif isinstance(args[0] , _A ):
__A : Optional[int] = (prefix + args[0],)
__A : Tuple = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__A : Tuple = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_A , **_A ):
__A : Optional[int] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , _A , _A=TruncationStrategy.DO_NOT_TRUNCATE , **_A ):
__A : List[Any] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def UpperCAmelCase_ ( self , _A , **_A ):
if self.framework == "pt":
__A , __A : Optional[Any] = model_inputs['input_ids'].shape
elif self.framework == "tf":
__A , __A : Tuple = tf.shape(model_inputs['input_ids'] ).numpy()
__A : str = generate_kwargs.get('min_length' , self.model.config.min_length )
__A : List[str] = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__A : Tuple = self.model.generate(**_A , **_A )
__A : str = output_ids.shape[0]
if self.framework == "pt":
__A : Any = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__A : Optional[Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , _A , _A=ReturnType.TEXT , _A=False ):
__A : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__A : str = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__A : Optional[Any] = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''summary'''
def __call__( self , *_A , **_A ):
return super().__call__(*_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''translation'''
def UpperCAmelCase_ ( self , _A , _A , _A ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *_A , _A=TruncationStrategy.DO_NOT_TRUNCATE , _A=None , _A=None ):
if getattr(self.tokenizer , '_build_translation_inputs' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def UpperCAmelCase_ ( self , _A=None , _A=None , **_A ):
__A , __A , __A : int = super()._sanitize_parameters(**_A )
if src_lang is not None:
__A : List[str] = src_lang
if tgt_lang is not None:
__A : Optional[int] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__A : int = kwargs.get('task' , self.task )
__A : List[str] = task.split('_' )
if task and len(_A ) == 4:
# translation, XX, to YY
__A : str = items[1]
__A : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_A , **_A ):
return super().__call__(*_A , **_A )
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase : Dict = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
UpperCAmelCase : str = random.Random()
def _SCREAMING_SNAKE_CASE ( a , a=1.0 , a=None , a=None ) -> Optional[int]:
if rng is None:
__A : Dict = global_rng
__A : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=400 , _A=2000 , _A=1 , _A=0.0 , _A=16000 , _A=True , _A=80 , _A=16 , _A=64 , _A="hann_window" , _A=80 , _A=7600 , _A=1e-1_0 , _A=True , ):
__A : Optional[Any] = parent
__A : Any = batch_size
__A : Tuple = min_seq_length
__A : str = max_seq_length
__A : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : List[str] = feature_size
__A : int = padding_value
__A : Optional[Any] = sampling_rate
__A : Any = do_normalize
__A : List[str] = num_mel_bins
__A : int = hop_length
__A : Tuple = win_length
__A : Union[str, Any] = win_function
__A : Union[str, Any] = fmin
__A : int = fmax
__A : Union[str, Any] = mel_floor
__A : Optional[int] = return_attention_mask
def UpperCAmelCase_ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase_ ( self , _A=False , _A=False ):
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__A : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : Optional[int] = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase_ ( self , _A=False , _A=False ):
if equal_length:
__A : List[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__A : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : Optional[Any] = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = SpeechTaFeatureExtractor
def UpperCAmelCase_ ( self ):
__A : Tuple = SpeechTaFeatureExtractionTester(self )
def UpperCAmelCase_ ( self , _A ):
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Optional[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
__A : List[str] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__A : Any = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__A : List[str] = feat_extract(_A , return_tensors='np' ).input_values
__A : int = feat_extract(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
__A : List[str] = [None, 1600, None]
for max_length, padding in zip(_A , _A ):
__A : Tuple = feat_extract(_A , padding=_A , max_length=_A , return_tensors='np' )
__A : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase_ ( self ):
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : int = range(800 , 1400 , 200 )
__A : Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
__A : Any = ['longest', 'max_length', 'do_not_pad']
__A : int = [None, 1600, None]
for max_length, padding in zip(_A , _A ):
__A : List[str] = feat_extract(_A , max_length=_A , padding=_A )
__A : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Dict = feat_extract(
_A , truncation=_A , max_length=1000 , padding='max_length' , return_tensors='np' )
__A : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase_ ( self ):
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Tuple = feat_extract(
_A , truncation=_A , max_length=1000 , padding='longest' , return_tensors='np' )
__A : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__A : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : str = feat_extract(
_A , truncation=_A , max_length=2000 , padding='longest' , return_tensors='np' )
__A : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCAmelCase_ ( self ):
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : List[str] = np.random.rand(100 ).astype(np.floataa )
__A : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : Optional[int] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A : List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : List[str] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
__A : Dict = feature_extractor(audio_target=_A , padding=_A , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__A : Dict = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
__A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__A : Union[str, Any] = feature_extractor(_A , return_tensors='np' ).input_values
__A : Any = feature_extractor(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__A : Union[str, Any] = np.asarray(_A )
__A : Optional[int] = feature_extractor(_A , return_tensors='np' ).input_values
__A : List[Any] = feature_extractor(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Dict = self.feat_extract_tester.prepare_inputs_for_target()
__A : str = self.feature_extraction_class(**self.feat_extract_dict )
__A : Any = feat_extract.model_input_names[0]
__A : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
__A : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
__A : int = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__A : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__A : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase_ ( self ):
__A : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__A : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__A : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.feature_extraction_class(**self.feat_extract_dict )
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__A : int = feat_extract.model_input_names[0]
__A : Optional[Any] = BatchFeature({input_name: speech_inputs} )
__A : Union[str, Any] = feat_extract.num_mel_bins # hack!
__A : Dict = feat_extract.pad(_A , padding='longest' , return_tensors='np' )[input_name]
__A : str = feat_extract.pad(_A , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self ):
__A : Dict = self.feat_extract_dict
__A : str = True
__A : Union[str, Any] = self.feature_extraction_class(**_A )
__A : str = self.feat_extract_tester.prepare_inputs_for_target()
__A : Optional[int] = [len(_A ) for x in speech_inputs]
__A : Any = feat_extract.model_input_names[0]
__A : int = BatchFeature({input_name: speech_inputs} )
__A : Union[str, Any] = feat_extract.num_mel_bins # hack!
__A : Dict = feat_extract.pad(_A , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.feat_extract_dict
__A : List[Any] = True
__A : Optional[int] = self.feature_extraction_class(**_A )
__A : int = self.feat_extract_tester.prepare_inputs_for_target()
__A : Union[str, Any] = [len(_A ) for x in speech_inputs]
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : str = BatchFeature({input_name: speech_inputs} )
__A : Dict = min(_A )
__A : List[Any] = feat_extract.num_mel_bins # hack!
__A : Optional[Any] = feat_extract.pad(
_A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='np' )
self.assertIn('attention_mask' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCAmelCase_ ( self , _A ):
from datasets import load_dataset
__A : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__A : Optional[Any] = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self ):
# fmt: off
__A : Optional[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__A : Any = self._load_datasamples(1 )
__A : Tuple = SpeechTaFeatureExtractor()
__A : str = feature_extractor(_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _A , atol=1e-6 ) )
def UpperCAmelCase_ ( self ):
# fmt: off
__A : Optional[int] = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__A : List[str] = self._load_datasamples(1 )
__A : Optional[Any] = SpeechTaFeatureExtractor()
__A : Optional[int] = feature_extractor(audio_target=_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 280 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCamelCase : str = "audio"
UpperCamelCase : str = "labels"
def UpperCAmelCase_ ( self , _A ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
__A : Any = copy.deepcopy(self )
__A : Optional[int] = self.label_schema.copy()
__A : Any = features[self.label_column]
__A : str = label_schema
return task_template
@property
def UpperCAmelCase_ ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 280 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : int = 'hf-internal-testing/tiny-random-t5'
__A : List[str] = AutoTokenizer.from_pretrained(_A )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(_A )
__A : str = tokenizer('This is me' , return_tensors='pt' )
__A : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__A : Any = model.generate(**_A )
__A : Any = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__A : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'hf-internal-testing/tiny-random-t5'
__A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_A )
__A : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
__A : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 280 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 1 |
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Tuple = set_counts
__A : Union[str, Any] = max(_A )
__A : List[str] = len(_A )
__A : Union[str, Any] = [1] * num_sets
__A : int = list(range(_A ) )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Tuple = self.get_parent(_A )
__A : Dict = self.get_parent(_A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__A : Optional[Any] = 0
__A : int = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__A : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__A : Any = 0
__A : Optional[int] = src_parent
__A : Optional[Any] = self.set_counts[src_parent]
__A : List[str] = max(self.max_set , _A )
return True
def UpperCAmelCase_ ( self , _A ):
if self.parents[disj_set] == disj_set:
return disj_set
__A : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 280 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 1 |
from torch import nn
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=30 , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.0_2 , _A=None , ):
__A : str = parent
__A : int = batch_size
__A : Any = image_size
__A : Any = patch_size
__A : Optional[Any] = num_channels
__A : Union[str, Any] = is_training
__A : int = use_labels
__A : Any = hidden_size
__A : Tuple = num_hidden_layers
__A : Any = num_attention_heads
__A : List[Any] = intermediate_size
__A : int = hidden_act
__A : Any = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : List[Any] = (image_size // patch_size) ** 2
__A : Optional[Any] = num_patches + 1
def UpperCAmelCase_ ( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : List[str] = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Optional[int] = ViTMSNModel(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : str = self.type_sequence_label_size
__A : Optional[Any] = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
__A : Dict = model(_A , labels=_A )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A : str = 1
__A : Any = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
__A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : str = config_and_inputs
__A : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase : str = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
def UpperCAmelCase_ ( self ):
__A : Dict = ViTMSNModelTester(self )
__A : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
__A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = ViTMSNModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__A : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
torch.manual_seed(2 )
__A : Union[str, Any] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_A )
__A : Dict = self.default_image_processor
__A : Tuple = prepare_img()
__A : List[Any] = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(**_A )
# verify the logits
__A : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__A : str = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> list:
__A : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__A : Dict = True
for i in range(0 , len(a ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__A , __A : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : List[str] = False
for i in range(1 , len(a ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__A , __A : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : int = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCAmelCase : Dict = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from __future__ import annotations
class _A:
"""simple docstring"""
def __init__( self , _A , _A ):
__A , __A : Optional[int] = text, pattern
__A , __A : Tuple = len(_A ), len(_A )
def UpperCAmelCase_ ( self , _A ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase_ ( self , _A ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase_ ( self ):
# searches pattern in text and returns index positions
__A : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__A : Tuple = self.mismatch_in_text(_A )
if mismatch_index == -1:
positions.append(_A )
else:
__A : Dict = self.match_in_pattern(self.text[mismatch_index] )
__A : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase : Optional[int] = '''ABAABA'''
UpperCAmelCase : List[Any] = '''AB'''
UpperCAmelCase : Optional[int] = BoyerMooreSearch(text, pattern)
UpperCAmelCase : str = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
UpperCAmelCase : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
UpperCAmelCase : Tuple = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if set(a ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__A : Any = ''
for word in coded.split():
while len(a ) != 0:
decoded += decode_dict[word[:5]]
__A : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
for param in module.parameters():
__A : int = False
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__A : Optional[Any] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : Union[str, Any] = plt.imshow(a )
fig.axes.get_xaxis().set_visible(a )
fig.axes.get_yaxis().set_visible(a )
plt.show()
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : str = datetime.now()
__A : List[str] = current_time.strftime('%H:%M:%S' )
return timestamp
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
__A : List[str] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
__A : str = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
__A : Optional[int] = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
__A : Any = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
__A : List[str] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
__A : Dict = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
__A : int = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
__A : List[Any] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
__A : List[Any] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
__A : List[str] = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
__A : int = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
__A : Optional[Any] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
__A : Optional[int] = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
__A : List[str] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
__A : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__A : Any = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__A : Dict = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__A : Dict = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
__A : Union[str, Any] = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
__A : Tuple = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
__A : Dict = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
__A : Optional[Any] = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
__A : List[str] = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
__A : List[Any] = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def _SCREAMING_SNAKE_CASE ( a , a ) -> Tuple:
for key in orig_state_dict.copy().keys():
__A : int = orig_state_dict.pop(a )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__A : Tuple = key.split('.' )
__A , __A : Union[str, Any] = int(key_split[2] ), int(key_split[4] )
__A : List[Any] = config.vision_config.hidden_size
if "weight" in key:
__A : Union[str, Any] = val[:dim, :]
__A : Any = val[dim : dim * 2, :]
__A : List[str] = val[-dim:, :]
else:
__A : Optional[Any] = val[:dim]
__A : Dict = val[dim : dim * 2]
__A : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__A : int = key.split('.' )
__A : Optional[int] = int(key_split[3] )
__A : int = config.text_config.hidden_size
if "weight" in key:
__A : int = val[:dim, :]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Optional[int] = val[-dim:, :]
else:
__A : Union[str, Any] = val[:dim]
__A : Optional[int] = val[dim : dim * 2]
__A : Any = val[-dim:]
else:
__A : Optional[int] = rename_key(a )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__A : Optional[Any] = val.squeeze_()
else:
__A : Optional[int] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Union[str, Any] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a="groupvit-gcc-yfcc" , a=False ) -> Optional[int]:
__A : Dict = GroupViTConfig()
__A : Any = GroupViTModel(a ).eval()
__A : str = torch.load(a , map_location='cpu' )['model']
__A : str = convert_state_dict(a , a )
__A , __A : int = model.load_state_dict(a , strict=a )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a ) == 0)
# verify result
__A : Any = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
__A : Union[str, Any] = prepare_img()
__A : str = processor(text=['a photo of a cat', 'a photo of a dog'] , images=a , padding=a , return_tensors='pt' )
with torch.no_grad():
__A : List[Any] = model(**a )
if model_name == "groupvit-gcc-yfcc":
__A : str = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
__A : Dict = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , a , atol=1e-3 )
processor.save_pretrained(a )
model.save_pretrained(a )
print('Successfully saved processor and model to' , a )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(a , organization='nielsr' )
model.push_to_hub(a , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase : List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _SCREAMING_SNAKE_CASE ( a , a=1_00 , a=" " ) -> List[str]:
__A : Any = text.split(a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a ) , a )]
def _SCREAMING_SNAKE_CASE ( a ) -> dict:
__A , __A : List[Any] = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(a ):
titles.append(title if title is not None else '' )
texts.append(a )
return {"title": titles, "text": texts}
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> dict:
__A : Any = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=a , padding='longest' , return_tensors='pt' )['input_ids']
__A : Optional[Any] = ctx_encoder(input_ids.to(device=a ) , return_dict=a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> Dict:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__A : Union[str, Any] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__A : Tuple = dataset.map(a , batched=a , num_proc=processing_args.num_proc )
# And compute the embeddings
__A : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a )
__A : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__A : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
__A : List[Any] = dataset.map(
partial(a , ctx_encoder=a , ctx_tokenizer=a ) , batched=a , batch_size=processing_args.batch_size , features=a , )
# And finally save your dataset
__A : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__A : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=a )
# And save the index
__A : Tuple = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _A:
"""simple docstring"""
UpperCamelCase : str = field(
default=str(Path(snake_case__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCamelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCamelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCamelCase : Optional[str] = field(
default=str(Path(snake_case__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _A:
"""simple docstring"""
UpperCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCamelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _A:
"""simple docstring"""
UpperCamelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
from collections import namedtuple
UpperCAmelCase : int = namedtuple('''from_to''', '''from_ to''')
UpperCAmelCase : List[str] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
assert isinstance(a , a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> str:
__A : int = tmp_path / 'cache'
__A : Optional[Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A : List[Any] = TextDatasetReader(a , cache_dir=a , keep_in_memory=a ).read()
_check_text_dataset(a , a )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> int:
__A : Any = tmp_path / 'cache'
__A : List[str] = {'text': 'string'}
__A : List[Any] = features.copy() if features else default_expected_features
__A : List[str] = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A : Optional[int] = TextDatasetReader(a , features=a , cache_dir=a ).read()
_check_text_dataset(a , a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Tuple:
__A : Tuple = tmp_path / 'cache'
__A : Union[str, Any] = {'text': 'string'}
__A : Optional[int] = TextDatasetReader(a , cache_dir=a , split=a ).read()
_check_text_dataset(a , a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> List[Any]:
if issubclass(a , a ):
__A : Tuple = text_path
elif issubclass(a , a ):
__A : Optional[Any] = [text_path]
__A : List[Any] = tmp_path / 'cache'
__A : Optional[int] = {'text': 'string'}
__A : Tuple = TextDatasetReader(a , cache_dir=a ).read()
_check_text_dataset(a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a=("train",) ) -> Tuple:
assert isinstance(a , a )
for split in splits:
__A : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> List[str]:
__A : Tuple = tmp_path / 'cache'
__A : List[Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A : Tuple = TextDatasetReader({'train': text_path} , cache_dir=a , keep_in_memory=a ).read()
_check_text_datasetdict(a , a )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> int:
__A : Optional[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__A : int = {'text': 'string'}
__A : str = features.copy() if features else default_expected_features
__A : str = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A : int = TextDatasetReader({'train': text_path} , features=a , cache_dir=a ).read()
_check_text_datasetdict(a , a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
if split:
__A : List[str] = {split: text_path}
else:
__A : List[Any] = 'train'
__A : int = {'train': text_path, 'test': text_path}
__A : Optional[Any] = tmp_path / 'cache'
__A : List[str] = {'text': 'string'}
__A : int = TextDatasetReader(a , cache_dir=a ).read()
_check_text_datasetdict(a , a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''openai/whisper-base'''
UpperCamelCase : int = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
UpperCamelCase : Optional[int] = '''transcriber'''
UpperCamelCase : Optional[int] = WhisperProcessor
UpperCamelCase : str = WhisperForConditionalGeneration
UpperCamelCase : Optional[Any] = ['''audio''']
UpperCamelCase : Optional[Any] = ['''text''']
def UpperCAmelCase_ ( self , _A ):
return self.pre_processor(_A , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self , _A ):
return self.model.generate(inputs=_A )
def UpperCAmelCase_ ( self , _A ):
return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0]
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : int = 16
UpperCAmelCase : List[Any] = 32
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
return int(x / 2**20 )
class _A:
"""simple docstring"""
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__A : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *_A ):
gc.collect()
torch.cuda.empty_cache()
__A : Dict = torch.cuda.memory_allocated()
__A : List[str] = torch.cuda.max_memory_allocated()
__A : str = bamb(self.end - self.begin )
__A : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _SCREAMING_SNAKE_CASE ( a , a = 16 , a = "bert-base-cased" , a = 3_20 , a = 1_60 , ) -> Optional[Any]:
__A : str = AutoTokenizer.from_pretrained(a )
__A : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
__A : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A : List[Any] = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__A : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
__A : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
# Initialize accelerator
__A : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Union[str, Any] = config['lr']
__A : Union[str, Any] = int(config['num_epochs'] )
__A : List[Any] = int(config['seed'] )
__A : Tuple = int(config['batch_size'] )
__A : List[str] = args.model_name_or_path
set_seed(a )
__A , __A : Dict = get_dataloaders(a , a , a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : str = AutoModelForSequenceClassification.from_pretrained(a , return_dict=a )
# Instantiate optimizer
__A : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__A : Tuple = optimizer_cls(params=model.parameters() , lr=a )
if accelerator.state.deepspeed_plugin is not None:
__A : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__A : Optional[int] = 1
__A : List[Any] = (len(a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__A : Tuple = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=0 , num_training_steps=a , )
else:
__A : List[str] = DummyScheduler(a , total_num_steps=a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A : Optional[Any] = accelerator.prepare(
a , a , a , a , a )
# We need to keep track of how many total steps we have iterated over
__A : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
__A : Optional[Any] = 0
# Now we train the model
__A : List[Any] = {}
for epoch in range(a , a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a ):
__A : Optional[Any] = model(**a )
__A : Optional[Any] = outputs.loss
__A : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__A : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(a , a )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__A : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a , )
parser.add_argument(
'--output_dir' , type=a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=a , default=a , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=a , default=3_20 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=a , default=1_60 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=a , default=1 , help='Number of train epochs.' , )
__A : List[Any] = parser.parse_args()
__A : str = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
from __future__ import annotations
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Optional[int] = data
__A : Node | None = None
__A : Node | None = None
def _SCREAMING_SNAKE_CASE ( a ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
__A : Optional[int] = Node(1 )
__A : Optional[Any] = Node(2 )
__A : str = Node(3 )
__A : Union[str, Any] = Node(4 )
__A : Optional[int] = Node(5 )
__A : List[str] = Node(6 )
__A : List[str] = Node(7 )
__A : Union[str, Any] = Node(8 )
__A : Optional[Any] = Node(9 )
print(is_full_binary_tree(a ) )
print(depth_of_tree(a ) )
print('Tree is: ' )
display(a )
if __name__ == "__main__":
main()
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a ) -> Image:
__A , __A : Union[str, Any] = image.size
__A : Dict = 0
__A : Tuple = image.load()
for i in range(a ):
for j in range(a ):
__A : Dict = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
__A : Any = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
import random
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
__A : Dict = num - 1
__A : Dict = 0
while s % 2 == 0:
__A : Any = s // 2
t += 1
for _ in range(5 ):
__A : List[Any] = random.randrange(2 , num - 1 )
__A : Dict = pow(a , a , a )
if v != 1:
__A : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__A : int = i + 1
__A : Optional[int] = (v**2) % num
return True
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
if num < 2:
return False
__A : Any = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a )
def _SCREAMING_SNAKE_CASE ( a = 10_24 ) -> int:
while True:
__A : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a ):
return num
if __name__ == "__main__":
UpperCAmelCase : List[str] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 280 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( a , a , a , a = 1_00 , ) -> float:
__A : Optional[int] = x_start
__A : str = fnc(a )
__A : Optional[int] = 0.0
for _ in range(a ):
# Approximates curve as a sequence of linear lines and sums their length
__A : List[Any] = (x_end - x_start) / steps + xa
__A : int = fnc(a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__A : Any = xa
__A : Tuple = fxa
return length
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
UpperCAmelCase : Optional[int] = 10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 280 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : int = b.T
__A : List[Any] = np.sum(np.square(a ) , axis=1 )
__A : int = np.sum(np.square(a ) , axis=0 )
__A : Any = np.matmul(a , a )
__A : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
__A : Tuple = x.reshape(-1 , 3 )
__A : Any = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''pixel_values''']
def __init__( self , _A = None , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = True , **_A , ):
super().__init__(**_A )
__A : Optional[Any] = size if size is not None else {'height': 256, 'width': 256}
__A : Any = get_size_dict(_A )
__A : List[str] = np.array(_A ) if clusters is not None else None
__A : int = do_resize
__A : Union[str, Any] = size
__A : Dict = resample
__A : List[Any] = do_normalize
__A : str = do_color_quantize
def UpperCAmelCase_ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
__A : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A = None , ):
__A : Dict = rescale(image=_A , scale=1 / 1_2_7.5 , data_format=_A )
__A : str = image - 1
return image
def UpperCAmelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
__A : Any = do_resize if do_resize is not None else self.do_resize
__A : Dict = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(_A )
__A : Optional[Any] = resample if resample is not None else self.resample
__A : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__A : List[str] = clusters if clusters is not None else self.clusters
__A : str = np.array(_A )
__A : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__A : str = [to_numpy_array(_A ) for image in images]
if do_resize:
__A : int = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_normalize:
__A : List[Any] = [self.normalize(image=_A ) for image in images]
if do_color_quantize:
__A : List[Any] = [to_channel_dimension_format(_A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__A : List[Any] = np.array(_A )
__A : str = color_quantize(_A , _A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__A : Dict = images.shape[0]
__A : List[Any] = images.reshape(_A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__A : Union[str, Any] = list(_A )
else:
__A : Any = [to_channel_dimension_format(_A , _A ) for image in images]
__A : List[Any] = {'input_ids': images}
return BatchFeature(data=_A , tensor_type=_A )
| 280 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( a , a , a = 10**-10 ) -> float:
__A : Tuple = a
while True:
__A : List[Any] = Decimal(a ) - (
Decimal(eval(a ) ) / Decimal(eval(str(diff(a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a ) ) < precision: # noqa: S307
return float(a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import torch
from diffusers import DiffusionPipeline
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A ):
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
def __call__( self ):
__A : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__A : int = 1
__A : Dict = self.unet(_A , _A ).sample
__A : Dict = self.scheduler.step(_A , _A , _A ).prev_sample
__A : int = scheduler_output - scheduler_output + torch.ones_like(_A )
return result
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : List[Any] = np.inf
def set_batch_size(a ) -> None:
nonlocal batch_size
if isinstance(a , a ):
__A : List[str] = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a , a ):
__A : Tuple = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a , a ) and feature.dtype == "binary":
__A : int = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a , a )
return None if batch_size is np.inf else batch_size
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , **_A , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
__A : Dict = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
__A : List[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__A : Any = Parquet(
cache_dir=_A , data_files=_A , features=_A , hash=_A , **_A , )
def UpperCAmelCase_ ( self ):
# Build iterable dataset
if self.streaming:
__A : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__A : Tuple = None
__A : int = None
__A : List[str] = None
__A : int = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
__A : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A = None , **_A , ):
__A : Optional[int] = dataset
__A : int = path_or_buf
__A : Optional[int] = batch_size or get_writer_batch_size(dataset.features )
__A : List[str] = parquet_writer_kwargs
def UpperCAmelCase_ ( self ):
__A : Dict = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__A : List[str] = self._write(file_obj=_A , batch_size=_A , **self.parquet_writer_kwargs )
else:
__A : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=_A , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase_ ( self , _A , _A , **_A ):
__A : Any = 0
__A : Tuple = parquet_writer_kwargs.pop('path_or_buf' , _A )
__A : Union[str, Any] = self.dataset.features.arrow_schema
__A : Optional[Any] = pq.ParquetWriter(_A , schema=_A , **_A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__A : Optional[Any] = query_table(
table=self.dataset._data , key=slice(_A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_A )
written += batch.nbytes
writer.close()
return written
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
__A : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
__A : int = getattr(a , a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__A : Tuple = new_module
__A : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__A : Tuple = tensor_name in module._buffers
__A : Tuple = getattr(a , a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__A : Optional[Any] = False
__A : List[str] = False
if is_buffer or not is_bitsandbytes_available():
__A : List[Any] = False
__A : Dict = False
else:
__A : Optional[Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__A : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__A : Optional[int] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__A : int = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__A : Dict = value.to('cpu' )
if value.dtype == torch.inta:
__A : List[str] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__A : Union[str, Any] = torch.tensor(a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , a ) and fpaa_statistics is None:
__A : Any = new_value.T
__A : Union[str, Any] = old_value.__dict__
if is_abit:
__A : Optional[Any] = bnb.nn.IntaParams(a , requires_grad=a , **a ).to(a )
elif is_abit:
__A : Tuple = bnb.nn.Paramsabit(a , requires_grad=a , **a ).to(a )
__A : Optional[int] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(a ) )
else:
if value is None:
__A : Dict = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__A : Union[str, Any] = value.to(a )
else:
__A : str = torch.tensor(a , device=a )
if is_buffer:
__A : List[Any] = new_value
else:
__A : str = nn.Parameter(a , requires_grad=old_value.requires_grad )
__A : Optional[int] = new_value
def _SCREAMING_SNAKE_CASE ( a , a=None , a=None , a=None , a=False ) -> str:
for name, module in model.named_children():
if current_key_name is None:
__A : Optional[Any] = []
current_key_name.append(a )
if (isinstance(a , nn.Linear ) or isinstance(a , a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(a , a ):
__A , __A : str = module.weight.shape
else:
__A : int = module.in_features
__A : List[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__A : int = bnb.nn.LinearabitLt(
a , a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__A : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__A : Any = bnb.nn.Linearabit(
a , a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__A : Union[str, Any] = True
# Store the module class in case we need to transpose the weight later
__A : str = type(a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(a )
if len(list(module.children() ) ) > 0:
__A , __A : str = _replace_with_bnb_linear(
a , a , a , a , has_been_replaced=a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( a , a=None , a=None , a=None ) -> Any:
__A : Optional[int] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__A , __A : Optional[Any] = _replace_with_bnb_linear(
a , a , a , a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> str:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , a , )
return replace_with_bnb_linear(*a , **a )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , a , )
return set_module_quantized_tensor_to_device(*a , **a )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : str = deepcopy(a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__A : Tuple = find_tied_parameters(a )
# For compatibility with Accelerate < 0.18
if isinstance(a , a ):
__A : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__A : Dict = sum(a , [] )
__A : Optional[int] = len(a ) > 0
# Check if it is a base model
__A : Dict = not hasattr(a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__A : Tuple = list(model.named_children() )
__A : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
__A : List[Any] = set(a ) - set(a )
__A : int = list(set(a ) ) + list(a )
# remove ".weight" from the keys
__A : List[Any] = ['.weight', '.bias']
__A : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__A : int = name.replace(a , '' )
filtered_module_names.append(a )
return filtered_module_names
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase : Union[str, Any] = TypeVar('''KEY''')
UpperCAmelCase : List[Any] = TypeVar('''VAL''')
@dataclass(frozen=snake_case__ , slots=snake_case__ )
class _A( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCamelCase : KEY
UpperCamelCase : VAL
class _A( _Item ):
"""simple docstring"""
def __init__( self ):
super().__init__(_A , _A )
def __bool__( self ):
return False
UpperCAmelCase : List[Any] = _DeletedItem()
class _A( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , _A = 8 , _A = 0.7_5 ):
__A : List[Any] = initial_block_size
__A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__A : Tuple = capacity_factor
__A : int = 0
def UpperCAmelCase_ ( self , _A ):
return hash(_A ) % len(self._buckets )
def UpperCAmelCase_ ( self , _A ):
return (ind + 1) % len(self._buckets )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Tuple = self._buckets[ind]
if not stored:
__A : List[Any] = _Item(_A , _A )
self._len += 1
return True
elif stored.key == key:
__A : Dict = _Item(_A , _A )
return True
else:
return False
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_A )
def UpperCAmelCase_ ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
__A : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase_ ( self , _A ):
__A : Any = self._buckets
__A : str = [None] * new_size
__A : Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase_ ( self ):
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase_ ( self ):
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = self._get_bucket_index(_A )
for _ in range(len(self._buckets ) ):
yield ind
__A : int = self._get_next_ind(_A )
def UpperCAmelCase_ ( self , _A , _A ):
for ind in self._iterate_buckets(_A ):
if self._try_set(_A , _A , _A ):
break
def __setitem__( self , _A , _A ):
if self._is_full():
self._size_up()
self._add_item(_A , _A )
def __delitem__( self , _A ):
for ind in self._iterate_buckets(_A ):
__A : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(_A )
if item is _deleted:
continue
if item.key == key:
__A : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _A ):
for ind in self._iterate_buckets(_A ):
__A : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_A )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
__A : str = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
import re
import string
import numpy as np
import datasets
UpperCAmelCase : int = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase : List[Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase : int = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self , _A , _A , _A=None , _A=False , _A=False , _A=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__A : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
__A : Union[str, Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
__A : int = np.asarray(_A )
__A : str = np.asarray(_A )
if ignore_case:
__A : Dict = np.char.lower(_A )
__A : Optional[int] = np.char.lower(_A )
if ignore_punctuation:
__A : int = string.punctuation.maketrans('' , '' , string.punctuation )
__A : Optional[Any] = np.char.translate(_A , table=_A )
__A : Union[str, Any] = np.char.translate(_A , table=_A )
if ignore_numbers:
__A : Optional[Any] = string.digits.maketrans('' , '' , string.digits )
__A : Optional[int] = np.char.translate(_A , table=_A )
__A : Optional[Any] = np.char.translate(_A , table=_A )
__A : Any = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCAmelCase : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , _A=None , _A=None , _A=None , **_A ):
super().__init__(*_A , **_A )
__A : Optional[Any] = eval_examples
__A : Optional[Any] = post_process_function
__A : int = quant_trainer_args
__A : Dict = 128 # default number of calibration samples
def UpperCAmelCase_ ( self , _A=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
__A : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
__A : Dict = self._remove_unused_columns(_A , description='Calibration' )
return DataLoader(
_A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , )
def UpperCAmelCase_ ( self , _A=None ):
__A : Any = self.train_dataset if calib_dataset is None else calib_dataset
__A : List[Any] = self.get_calib_dataloader(_A )
__A : Tuple = self.model
quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info('***** Running calibration *****' )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(_A ):
# Prediction step
__A , __A , __A : Dict = self.prediction_step(_A , _A , prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A , self.quant_trainer_args )
__A : Tuple = model
def UpperCAmelCase_ ( self , _A=None , _A=None , _A=None , _A = "eval" ):
__A : int = self.eval_dataset if eval_dataset is None else eval_dataset
__A : Optional[Any] = self.get_eval_dataloader(_A )
__A : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__A : Dict = self.compute_metrics
__A : int = None
__A : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__A : Tuple = eval_loop(
_A , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__A : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__A : Dict = self.post_process_function(_A , _A , output.predictions )
__A : Optional[Any] = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__A : List[str] = metrics.pop(_A )
self.log(_A )
else:
__A : str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__A : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def UpperCAmelCase_ ( self , _A , _A , _A=None , _A = "test" ):
__A : Optional[int] = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__A : int = self.compute_metrics
__A : Tuple = None
__A : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__A : Union[str, Any] = eval_loop(
_A , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__A : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__A : Any = self.post_process_function(_A , _A , output.predictions , 'predict' )
__A : str = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__A : Tuple = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
def UpperCAmelCase_ ( self , _A="./" ):
__A : Tuple = self.eval_dataset
__A : List[str] = self.get_eval_dataloader(_A )
__A : Union[str, Any] = next(iter(_A ) )
# saving device - to make it consistent
__A : List[str] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
__A : Any = tuple(v.to(_A ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
__A : str = True
__A : str = self.model.to(_A )
model.eval()
model.float()
__A : str = model.module if hasattr(_A , 'module' ) else model
quant_trainer.configure_model(_A , self.quant_trainer_args )
__A : List[Any] = os.path.join(_A , 'model.onnx' )
logger.info(F"""exporting model to {output_model_file}""" )
__A : str = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
_A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=_A , )
logger.info('onnx export finished' )
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Tuple = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
UpperCAmelCase : List[str] = {'''allegro/herbert-base-cased''': 5_14}
UpperCAmelCase : str = {}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self , _A=None , _A=None , _A=None , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A="</s>" , **_A , ):
super().__init__(
_A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , )
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Tuple = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , _A , _A = None , _A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Any = [self.sep_token_id]
__A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
import os
import pytest
from attr import dataclass
UpperCAmelCase : List[Any] = '''us-east-1''' # defaults region
@dataclass
class _A:
"""simple docstring"""
UpperCamelCase : str
UpperCamelCase : Tuple = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
UpperCamelCase : str = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
UpperCamelCase : Optional[int] = {**hyperparameters, '''max_steps''': 1_000}
@property
def UpperCAmelCase_ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase_ ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def UpperCAmelCase_ ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCAmelCase_ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( a , a , a = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def _SCREAMING_SNAKE_CASE ( a , a , a = 10**-1 ) -> bool:
__A : NDArray[floataa] = cross(a , a )
__A : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase : Tuple = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
UpperCAmelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
UpperCAmelCase : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase : Dict = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
UpperCAmelCase : int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _A( snake_case__ , snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = 1
@register_to_config
def __init__( self , _A = 1000 , _A = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_A )
# standard deviation of the initial noise distribution
__A : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__A : List[Any] = 4
# running values
__A : Union[str, Any] = []
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Union[str, Any] = num_inference_steps
__A : str = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__A : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__A : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__A : Any = torch.sin(steps * math.pi / 2 ) ** 2
__A : Union[str, Any] = (1.0 - self.betas**2) ** 0.5
__A : Union[str, Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__A : Tuple = timesteps.to(_A )
__A : Optional[Any] = []
def UpperCAmelCase_ ( self , _A , _A , _A , _A = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
__A : List[Any] = (self.timesteps == timestep).nonzero().item()
__A : Tuple = timestep_index + 1
__A : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_A )
if len(self.ets ) == 1:
__A : Optional[Any] = self.ets[-1]
elif len(self.ets ) == 2:
__A : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__A : Any = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__A : Any = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__A : Tuple = self._get_prev_sample(_A , _A , _A , _A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def UpperCAmelCase_ ( self , _A , *_A , **_A ):
return sample
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
__A : Optional[int] = self.alphas[timestep_index]
__A : Dict = self.betas[timestep_index]
__A : List[Any] = self.alphas[prev_timestep_index]
__A : str = self.betas[prev_timestep_index]
__A : List[Any] = (sample - sigma * ets) / max(_A , 1e-8 )
__A : str = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
return self.config.num_train_timesteps
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : Dict = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__A : Optional[int] = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__A : Optional[Any] = components[:-1] + [test_fn.replace('.py' , '' )]
__A : str = '.'.join(a )
return test_module_path
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
__A : Dict = get_module_path(a )
__A : Dict = importlib.import_module(a )
return test_module
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : Union[str, Any] = []
__A : Tuple = get_test_module(a )
for attr in dir(a ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(a , a ) )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : Optional[Any] = []
__A : Union[str, Any] = get_test_module(a )
for attr in dir(a ):
__A : List[Any] = getattr(a , a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__A : Dict = getattr(a , 'all_model_classes' , [] )
if len(a ) > 0:
test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : Tuple = get_test_classes(a )
__A : List[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : str = test_class()
if hasattr(a , 'setUp' ):
test.setUp()
__A : List[Any] = None
if hasattr(a , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__A : int = test.model_tester.__class__
return model_tester
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
__A : List[Any] = get_test_classes(a )
__A : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : Union[str, Any] = get_test_classes_for_model(a , a )
__A : List[str] = []
for test_class in test_classes:
__A : Optional[Any] = get_model_tester_from_test_class(a )
if tester_class is not None:
tester_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = get_test_classes(a )
__A : Optional[Any] = {test_class: get_model_tester_from_test_class(a ) for test_class in test_classes}
return test_tester_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
__A : Optional[Any] = get_model_classes(a )
__A : List[Any] = {
model_class: get_test_classes_for_model(a , a ) for model_class in model_classes
}
return model_test_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
__A : int = get_model_classes(a )
__A : List[str] = {
model_class: get_tester_classes_for_model(a , a ) for model_class in model_classes
}
return model_to_tester_mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
if isinstance(a , a ):
return o
elif isinstance(a , a ):
return o.__name__
elif isinstance(a , (list, tuple) ):
return [to_json(a ) for x in o]
elif isinstance(a , a ):
return {to_json(a ): to_json(a ) for k, v in o.items()}
else:
return o
| 280 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase : Dict = 12_80_22
UpperCAmelCase : Any = 12_80_28
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = MaMaaaTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
UpperCamelCase : Any = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : int = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__A : Tuple = dict(zip(_A , range(len(_A ) ) ) )
__A : List[Any] = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__A : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_A ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self , _A ):
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self ):
__A : Tuple = '</s>'
__A : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__A : str = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , 'This is a test' )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : List[Any] = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''facebook/m2m100_418M'''
UpperCamelCase : Any = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCamelCase : Optional[Any] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCamelCase : List[str] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCAmelCase_ ( cls ):
__A : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__A : Optional[Any] = 1
return cls
def UpperCAmelCase_ ( self ):
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128063 )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _A )
def UpperCAmelCase_ ( self ):
__A : Any = 'en'
__A : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def UpperCAmelCase_ ( self ):
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
__A : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
__A : str = self.tokenizer.decode(_A , skip_special_tokens=_A )
__A : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def UpperCAmelCase_ ( self ):
__A : int = tempfile.mkdtemp()
__A : int = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
__A : Union[str, Any] = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'en'
__A : Tuple = 'fr'
__A : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='pt' )
__A : Union[str, Any] = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__A : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase_ ( self ):
__A : Tuple = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__A : List[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__A : Optional[Any] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : Any = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'input_ids': [[128022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128006,
} , )
| 280 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = inspect.getfile(accelerate.test_utils )
__A : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__A : Dict = test_metrics
@require_cpu
def UpperCAmelCase_ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCAmelCase_ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCAmelCase_ ( self ):
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase_ ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__A : Any = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
| 280 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : int = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Dict = '''switch_transformers'''
UpperCamelCase : Dict = ['''past_key_values''']
UpperCamelCase : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _A=32128 , _A=768 , _A=64 , _A=2048 , _A=64 , _A=12 , _A=3 , _A=12 , _A=3 , _A=12 , _A=8 , _A=False , _A=0.0_1 , _A="float32" , _A=False , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=0.0_0_1 , _A=0.0_0_1 , _A=1.0 , _A="relu" , _A=True , _A=False , _A=True , _A=0 , _A=1 , **_A , ):
__A : Dict = vocab_size
__A : Any = d_model
__A : Dict = d_kv
__A : str = d_ff
__A : Optional[Any] = num_sparse_encoder_layers
__A : str = num_layers
__A : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__A : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__A : Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__A : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__A : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__A : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__A : int = num_heads
__A : List[str] = num_experts
__A : Optional[Any] = expert_capacity
__A : Optional[int] = router_bias
__A : Dict = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__A : Any = router_dtype
__A : str = router_ignore_padding_tokens
__A : List[str] = relative_attention_num_buckets
__A : List[str] = relative_attention_max_distance
__A : List[str] = dropout_rate
__A : int = layer_norm_epsilon
__A : Optional[Any] = initializer_factor
__A : Union[str, Any] = feed_forward_proj
__A : Optional[int] = use_cache
__A : int = add_router_probs
__A : Optional[int] = router_z_loss_coef
__A : Optional[int] = router_aux_loss_coef
__A : List[str] = self.feed_forward_proj.split('-' )
__A : Any = act_info[-1]
__A : int = act_info[0] == 'gated'
if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__A : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : Dict = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__A : Optional[int] = 4
__A : List[Any] = 48
__A : Optional[Any] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A : Any = [6, 6, 6, 6]
__A : List[Any] = 60
__A : Union[str, Any] = [6, 6, 6, 6]
__A : str = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A : List[str] = 4
__A : Union[str, Any] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__A : Any = 1
__A : List[str] = 1
__A : Optional[Any] = 1_26
__A : Dict = 7
__A : int = 255.0
__A : Dict = ''
return config
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
if "patch_embed.proj" in name and "layers" not in name:
__A : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__A : Any = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__A : Any = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__A : Optional[int] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__A : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__A : Optional[int] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__A : List[Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__A : Any = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__A : List[str] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__A : Union[str, Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__A : Optional[int] = 'layernorm.weight'
if name == "norm.bias":
__A : Optional[Any] = 'layernorm.bias'
if "conv_first" in name:
__A : int = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__A : Union[str, Any] = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__A : Any = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__A : Optional[int] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__A : Any = name.replace('upsample.2' , 'upsample.convolution_1' )
__A : List[str] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__A : Optional[int] = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__A : List[Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__A : Optional[Any] = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(a )
if "qkv" in key:
__A : List[Any] = key.split('.' )
__A : Optional[Any] = int(key_split[1] )
__A : Union[str, Any] = int(key_split[4] )
__A : Union[str, Any] = config.embed_dim
if "weight" in key:
__A : List[str] = val[:dim, :]
__A : Tuple = val[dim : dim * 2, :]
__A : Optional[Any] = val[-dim:, :]
else:
__A : Union[str, Any] = val[:dim]
__A : Optional[int] = val[dim : dim * 2]
__A : str = val[-dim:]
pass
else:
__A : List[str] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
__A : Optional[int] = get_config(a )
__A : str = SwinaSRForImageSuperResolution(a )
model.eval()
__A : Optional[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )
__A : Any = convert_state_dict(a , a )
__A , __A : List[Any] = model.load_state_dict(a , strict=a )
if len(a ) > 0:
raise ValueError('Missing keys when converting: {}'.format(a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__A : Union[str, Any] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__A : Dict = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
__A : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__A : Any = 1_26 if 'Jpeg' in checkpoint_url else 2_56
__A : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__A : Tuple = transforms(a ).unsqueeze(0 )
if config.num_channels == 1:
__A : Union[str, Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
__A : str = model(a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__A : List[Any] = torch.Size([1, 3, 5_12, 5_12] )
__A : Optional[int] = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A : List[str] = torch.Size([1, 3, 10_24, 10_24] )
__A : Optional[int] = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__A : Union[str, Any] = torch.Size([1, 3, 10_24, 10_24] )
__A : Optional[int] = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
__A : Dict = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A : Optional[int] = torch.Size([1, 3, 10_24, 10_24] )
__A : Any = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , a , atol=1e-3 )
print('Looks ok!' )
__A : Optional[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__A : List[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(a )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class _A( snake_case__ , snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''maskformer-swin'''
UpperCamelCase : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _A=224 , _A=4 , _A=3 , _A=96 , _A=[2, 2, 6, 2] , _A=[3, 6, 12, 24] , _A=7 , _A=4.0 , _A=True , _A=0.0 , _A=0.0 , _A=0.1 , _A="gelu" , _A=False , _A=0.0_2 , _A=1e-5 , _A=None , _A=None , **_A , ):
super().__init__(**_A )
__A : Any = image_size
__A : str = patch_size
__A : Tuple = num_channels
__A : Optional[Any] = embed_dim
__A : str = depths
__A : List[str] = len(_A )
__A : Any = num_heads
__A : Optional[int] = window_size
__A : Union[str, Any] = mlp_ratio
__A : Tuple = qkv_bias
__A : int = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Any = drop_path_rate
__A : List[str] = hidden_act
__A : Any = use_absolute_embeddings
__A : Optional[Any] = layer_norm_eps
__A : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : Optional[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
__A : int = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )]
__A , __A : Dict = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 280 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280 | 1 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : Optional[int] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
class _A:
"""simple docstring"""
UpperCamelCase : str
UpperCamelCase : str = None
@staticmethod
def UpperCAmelCase_ ( ):
raise NotImplementedError
def UpperCAmelCase_ ( self , _A , _A , _A , **_A ):
raise NotImplementedError
def UpperCAmelCase_ ( self , _A ):
raise NotImplementedError
def UpperCAmelCase_ ( self ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase_ ( cls ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = '''optuna'''
@staticmethod
def UpperCAmelCase_ ( ):
return is_optuna_available()
def UpperCAmelCase_ ( self , _A , _A , _A , **_A ):
return run_hp_search_optuna(_A , _A , _A , **_A )
def UpperCAmelCase_ ( self , _A ):
return default_hp_space_optuna(_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = '''ray'''
UpperCamelCase : Optional[int] = '''\'ray[tune]\''''
@staticmethod
def UpperCAmelCase_ ( ):
return is_ray_available()
def UpperCAmelCase_ ( self , _A , _A , _A , **_A ):
return run_hp_search_ray(_A , _A , _A , **_A )
def UpperCAmelCase_ ( self , _A ):
return default_hp_space_ray(_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''sigopt'''
@staticmethod
def UpperCAmelCase_ ( ):
return is_sigopt_available()
def UpperCAmelCase_ ( self , _A , _A , _A , **_A ):
return run_hp_search_sigopt(_A , _A , _A , **_A )
def UpperCAmelCase_ ( self , _A ):
return default_hp_space_sigopt(_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''wandb'''
@staticmethod
def UpperCAmelCase_ ( ):
return is_wandb_available()
def UpperCAmelCase_ ( self , _A , _A , _A , **_A ):
return run_hp_search_wandb(_A , _A , _A , **_A )
def UpperCAmelCase_ ( self , _A ):
return default_hp_space_wandb(_A )
UpperCAmelCase : Union[str, Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a ) > 0:
__A : Optional[Any] = available_backends[0].name
if len(a ) > 1:
logger.info(
F"""{len(a )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 280 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> List[Any]:
__A : str = 0
while b > 0:
if b & 1:
__A : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , _A = True , **_A , ):
super().__init__(**_A )
__A : Tuple = size if size is not None else {'shortest_edge': 224}
__A : Optional[Any] = get_size_dict(_A , default_to_square=_A )
__A : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : Any = get_size_dict(_A , default_to_square=_A , param_name='crop_size' )
__A : Any = do_resize
__A : Dict = size
__A : Optional[int] = resample
__A : Union[str, Any] = do_center_crop
__A : Optional[Any] = crop_size
__A : int = do_rescale
__A : Tuple = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__A : Union[str, Any] = do_convert_rgb
def UpperCAmelCase_ ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
__A : List[str] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Union[str, Any] = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A = None , **_A , ):
__A : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A = None , **_A , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A , _A = None , **_A , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
__A : Dict = do_resize if do_resize is not None else self.do_resize
__A : Union[str, Any] = size if size is not None else self.size
__A : int = get_size_dict(_A , param_name='size' , default_to_square=_A )
__A : Dict = resample if resample is not None else self.resample
__A : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Any = crop_size if crop_size is not None else self.crop_size
__A : Optional[int] = get_size_dict(_A , param_name='crop_size' , default_to_square=_A )
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Any = image_mean if image_mean is not None else self.image_mean
__A : List[Any] = image_std if image_std is not None else self.image_std
__A : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : Any = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Union[str, Any] = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
__A : Any = [to_numpy_array(_A ) for image in images]
if do_resize:
__A : int = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__A : str = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__A : int = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__A : List[str] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__A : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images]
__A : List[Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( a , a , a = None ) -> Optional[int]:
__A : Tuple = tesseract_config if tesseract_config is not None else ''
# apply OCR
__A : Optional[Any] = to_pil_image(a )
__A , __A : Dict = pil_image.size
__A : Dict = pytesseract.image_to_data(a , lang=a , output_type='dict' , config=a )
__A , __A , __A , __A , __A : List[str] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
__A : Any = [idx for idx, word in enumerate(a ) if not word.strip()]
__A : Optional[int] = [word for idx, word in enumerate(a ) if idx not in irrelevant_indices]
__A : List[Any] = [coord for idx, coord in enumerate(a ) if idx not in irrelevant_indices]
__A : Union[str, Any] = [coord for idx, coord in enumerate(a ) if idx not in irrelevant_indices]
__A : List[Any] = [coord for idx, coord in enumerate(a ) if idx not in irrelevant_indices]
__A : Optional[Any] = [coord for idx, coord in enumerate(a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__A : int = []
for x, y, w, h in zip(a , a , a , a ):
__A : str = [x, y, x + w, y + h]
actual_boxes.append(a )
# finally, normalize the bounding boxes
__A : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(a , a , a ) )
assert len(a ) == len(a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = "" , **_A , ):
super().__init__(**_A )
__A : int = size if size is not None else {'height': 224, 'width': 224}
__A : List[str] = get_size_dict(_A )
__A : Optional[int] = do_resize
__A : Dict = size
__A : int = resample
__A : Optional[int] = apply_ocr
__A : List[str] = ocr_lang
__A : Union[str, Any] = tesseract_config
def UpperCAmelCase_ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
__A : List[str] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__A : Any = (size['height'], size['width'])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : int = size if size is not None else self.size
__A : Any = get_size_dict(_A )
__A : Union[str, Any] = resample if resample is not None else self.resample
__A : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
__A : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
__A : str = tesseract_config if tesseract_config is not None else self.tesseract_config
__A : Any = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
__A : Any = [to_numpy_array(_A ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
__A : Any = []
__A : List[Any] = []
for image in images:
__A , __A : Dict = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
__A : int = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__A : Tuple = [flip_channel_order(_A ) for image in images]
__A : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
__A : Union[str, Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_A )
if apply_ocr:
__A : Dict = words_batch
__A : Any = boxes_batch
return data
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase : Any = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
UpperCAmelCase : Union[str, Any] = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
UpperCAmelCase : List[Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Tuple:
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : Optional[int] = simple_accuracy(a , a )
__A : Tuple = float(fa_score(y_true=a , y_pred=a ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[Any]:
__A : Optional[Any] = float(pearsonr(a , a )[0] )
__A : Tuple = float(spearmanr(a , a )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCAmelCase_ ( self , _A , _A ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "stsb":
return pearson_and_spearman(_A , _A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_A , _A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCAmelCase : Optional[int] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , **_A , ):
super().__init__(
features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
__A : str = Generator(
cache_dir=_A , features=_A , generator=_A , gen_kwargs=_A , **_A , )
def UpperCAmelCase_ ( self ):
# Build iterable dataset
if self.streaming:
__A : Optional[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
__A : Any = None
__A : Optional[int] = None
__A : int = None
__A : str = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
__A : Tuple = self.builder.as_dataset(
split='train' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=32 , _A=2 , _A=3 , _A=16 , _A=[1, 2, 1] , _A=[2, 2, 4] , _A=2 , _A=2.0 , _A=True , _A=0.0 , _A=0.0 , _A=0.1 , _A="gelu" , _A=False , _A=True , _A=0.0_2 , _A=1e-5 , _A=True , _A=None , _A=True , _A=10 , _A=8 , ):
__A : Optional[Any] = parent
__A : Optional[Any] = batch_size
__A : Optional[Any] = image_size
__A : Optional[int] = patch_size
__A : Optional[int] = num_channels
__A : List[Any] = embed_dim
__A : Union[str, Any] = depths
__A : Tuple = num_heads
__A : Union[str, Any] = window_size
__A : str = mlp_ratio
__A : int = qkv_bias
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : Any = hidden_act
__A : List[str] = use_absolute_embeddings
__A : List[Any] = patch_norm
__A : Tuple = layer_norm_eps
__A : int = initializer_range
__A : Optional[int] = is_training
__A : List[str] = scope
__A : Union[str, Any] = use_labels
__A : Tuple = type_sequence_label_size
__A : Tuple = encoder_stride
def UpperCAmelCase_ ( self ):
__A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[Any] = None
if self.use_labels:
__A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Union[str, Any] = SwinvaModel(config=_A )
model.to(_A )
model.eval()
__A : int = model(_A )
__A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__A : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : str = SwinvaForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
__A : List[str] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A : List[Any] = 1
__A : Union[str, Any] = SwinvaForMaskedImageModeling(_A )
model.to(_A )
model.eval()
__A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : List[Any] = self.type_sequence_label_size
__A : List[str] = SwinvaForImageClassification(_A )
model.to(_A )
model.eval()
__A : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
__A : Any = self.prepare_config_and_inputs()
__A , __A , __A : List[Any] = config_and_inputs
__A : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : List[str] = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = SwinvaModelTester(self )
__A : str = ConfigTester(self , config_class=_A , embed_dim=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self ):
__A , __A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
__A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self ):
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = True
for model_class in self.all_model_classes:
__A : Dict = True
__A : Optional[int] = False
__A : Union[str, Any] = True
__A : Any = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
__A : Any = outputs.attentions
__A : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(_A ) , _A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : Optional[Any] = True
__A : List[str] = config.window_size**2
__A : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__A : int = len(_A )
# Check attention is always last and order is fine
__A : int = True
__A : Union[str, Any] = True
__A : Dict = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_A , _A ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
__A : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__A : int = 2
self.assertEqual(out_len + added_hidden_states , len(_A ) )
__A : Optional[int] = outputs.attentions
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
__A : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
__A : Optional[Any] = outputs.hidden_states
__A : Dict = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_A ) , _A )
# Swinv2 has a different seq_length
__A : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__A : str = outputs.reshaped_hidden_states
self.assertEqual(len(_A ) , _A )
__A , __A , __A , __A : Union[str, Any] = reshaped_hidden_states[0].shape
__A : Optional[Any] = (
reshaped_hidden_states[0].view(_A , _A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase_ ( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A : str = True
self.check_hidden_states_output(_A , _A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[Any] = True
self.check_hidden_states_output(_A , _A , _A , _A )
def UpperCAmelCase_ ( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = 3
__A : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A : Optional[int] = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : str = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Union[str, Any] = SwinvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self ):
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = _config_zero_init(_A )
for model_class in self.all_model_classes:
__A : Union[str, Any] = model_class(config=_A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_A )
__A : List[Any] = self.default_image_processor
__A : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Union[str, Any] = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__A : Any = model(**_A )
# verify the logits
__A : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__A : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
return getitem, k
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
return delitem, k
def _SCREAMING_SNAKE_CASE ( a , a , *a ) -> str:
try:
return fun(a , *a ), None
except Exception as e:
return None, e
UpperCAmelCase : str = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
UpperCAmelCase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
UpperCAmelCase : Union[str, Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
UpperCAmelCase : int = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
UpperCAmelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : List[Any] = HashMap(initial_block_size=4 )
__A : Dict = {}
for _, (fun, *args) in enumerate(a ):
__A , __A : Optional[int] = _run_operation(a , a , *a )
__A , __A : Union[str, Any] = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ) -> str:
def is_public(a ) -> bool:
return not name.startswith('_' )
__A : Optional[int] = {name for name in dir({} ) if is_public(a )}
__A : List[Any] = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A : Tuple = input('Enter message: ' )
__A : List[str] = int(input(F"""Enter key [2-{len(a ) - 1}]: """ ) )
__A : Any = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
__A : str = encrypt_message(a , a )
elif mode.lower().startswith('d' ):
__A : int = decrypt_message(a , a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : Optional[int] = [''] * key
for col in range(a ):
__A : Tuple = col
while pointer < len(a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : Dict = math.ceil(len(a ) / key )
__A : str = key
__A : int = (num_cols * num_rows) - len(a )
__A : Dict = [''] * num_cols
__A : str = 0
__A : Union[str, Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__A : Optional[Any] = 0
row += 1
return "".join(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 280 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''yolos'''
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-1_2 , _A=[512, 864] , _A=16 , _A=3 , _A=True , _A=100 , _A=True , _A=False , _A=1 , _A=5 , _A=2 , _A=5 , _A=2 , _A=0.1 , **_A , ):
super().__init__(**_A )
__A : Optional[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : List[Any] = intermediate_size
__A : Dict = hidden_act
__A : Any = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Tuple = initializer_range
__A : Tuple = layer_norm_eps
__A : Any = image_size
__A : Dict = patch_size
__A : Optional[int] = num_channels
__A : int = qkv_bias
__A : Dict = num_detection_tokens
__A : Optional[int] = use_mid_position_embeddings
__A : List[str] = auxiliary_loss
# Hungarian matcher
__A : Optional[Any] = class_cost
__A : str = bbox_cost
__A : Union[str, Any] = giou_cost
# Loss coefficients
__A : List[Any] = bbox_loss_coefficient
__A : Optional[Any] = giou_loss_coefficient
__A : List[str] = eos_coefficient
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-4
@property
def UpperCAmelCase_ ( self ):
return 12
| 280 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : int = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.