code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , *__a : int , **__a : str ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = BloomTokenizerFast
lowerCAmelCase__ : Tuple = BloomTokenizerFast
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = """tokenizer_file"""
lowerCAmelCase__ : Dict = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCamelCase__ (self : int ):
'''simple docstring'''
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : str , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ = tokenizer.batch_encode_plus(UpperCamelCase )['''input_ids''']
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ = ('''This is a simple input''', '''This is a pair''')
lowercase__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCamelCase )
lowercase__ = next(iter(UpperCamelCase ) )['''premise'''] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode , UpperCamelCase ) )
lowercase__ = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 2 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = 0
A : Any = len(snake_case__ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return arr, 0
A : Any = len(snake_case__ ) // 2
A : List[Any] = arr[0:mid]
A : Union[str, Any] = arr[mid:]
A, A : List[str] = count_inversions_recursive(snake_case__ )
A, A : Dict = count_inversions_recursive(snake_case__ )
A, A : Dict = _count_cross_inversions(snake_case__ , snake_case__ )
A : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = []
A : Optional[Any] = 0
while i < len(snake_case__ ) and j < len(snake_case__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A : List[Any] = count_inversions_bf(snake_case__ )
A, A : int = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , snake_case__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A : Tuple = count_inversions_bf(snake_case__ )
A, A : Optional[int] = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case__ )
# an empty list should also have zero inversions
A : int = []
A : List[Any] = count_inversions_bf(snake_case__ )
A, A : Optional[int] = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case__ )
if __name__ == "__main__":
main()
| 3 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
UpperCAmelCase__ = None
UpperCAmelCase__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
UpperCAmelCase__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase_ ( __snake_case , __snake_case=1 , __snake_case=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
with open(__snake_case , '''r''' ) as f:
return json.load(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
"""simple docstring"""
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =os.path.join(__snake_case , '''tmp''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =read_json(os.path.join(__snake_case , '''params.json''' ) )
_lowercase =NUM_SHARDS[model_size]
_lowercase =params['''n_layers''']
_lowercase =params['''n_heads''']
_lowercase =n_heads // num_shards
_lowercase =params['''dim''']
_lowercase =dim // n_heads
_lowercase =1_00_00.0
_lowercase =1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowercase =params['''n_kv_heads'''] # for GQA / MQA
_lowercase =n_heads_per_shard // num_key_value_heads
_lowercase =dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowercase =n_heads
_lowercase =n_heads_per_shard
_lowercase =dim
# permute for sliced rotary
def permute(__snake_case , __snake_case=n_heads , __snake_case=dim , __snake_case=dim ):
return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowercase =torch.load(os.path.join(__snake_case , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
_lowercase =[
torch.load(os.path.join(__snake_case , F"consolidated.{i:02d}.pth" ) , map_location='''cpu''' )
for i in range(__snake_case )
]
_lowercase =0
_lowercase ={'''weight_map''': {}}
for layer_i in range(__snake_case ):
_lowercase =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowercase ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowercase ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_lowercase =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) )
_lowercase =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , )
_lowercase =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )] , dim=1 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )] , dim=0 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )] , dim=1 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )] , dim=0 )
_lowercase =inv_freq
for k, v in state_dict.items():
_lowercase =filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
_lowercase =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowercase ={
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_lowercase ={
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__snake_case )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
_lowercase =filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
# Write configs
_lowercase ={'''total_size''': param_count * 2}
write_json(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin.index.json''' ) )
_lowercase =params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_lowercase =params['''multiple_of'''] if '''multiple_of''' in params else 256
_lowercase =LlamaConfig(
hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__snake_case , )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
_lowercase =LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__snake_case , safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_lowercase =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=__snake_case , help='''Whether or not to save using `safetensors`.''' )
_lowercase =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowercase =os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , __snake_case )
if __name__ == "__main__":
main()
| 5 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : List[Any] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['PerceiverFeatureExtractor']
A : Tuple = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 7 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = job['''started_at''']
__SCREAMING_SNAKE_CASE : List[str] = job['''completed_at''']
__SCREAMING_SNAKE_CASE : List[str] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__SCREAMING_SNAKE_CASE : Any = start
__SCREAMING_SNAKE_CASE : Optional[int] = end
__SCREAMING_SNAKE_CASE : Dict = duration_in_min
return job_info
def _UpperCamelCase ( lowercase__ , lowercase__=None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if token is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE : int = requests.get(lowercase__ , headers=lowercase__ ).json()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
__SCREAMING_SNAKE_CASE : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=lowercase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Tuple =parser.parse_args()
__lowerCAmelCase : Any =get_job_time(args.workflow_run_id)
__lowerCAmelCase : int =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 9 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =9
lowerCamelCase__: Union[str, Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: Optional[Any] =kruskal(__a , __a )
lowerCamelCase__: str =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a )
| 10 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
if rng is None:
_A : Dict = global_rng
_A : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=4_0_0 , __lowerCamelCase=2_0_0_0 , __lowerCamelCase=2_4 , __lowerCamelCase=2_4 , __lowerCamelCase=0.0 , __lowerCamelCase=1_6_0_0_0 , __lowerCamelCase=True , __lowerCamelCase=True , ) -> Tuple:
_A : Tuple = parent
_A : Any = batch_size
_A : List[Any] = min_seq_length
_A : List[Any] = max_seq_length
_A : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Optional[Any] = feature_size
_A : List[Any] = num_mel_bins
_A : Optional[int] = padding_value
_A : List[Any] = sampling_rate
_A : List[Any] = return_attention_mask
_A : List[str] = do_normalize
def _lowerCamelCase ( self) -> List[Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , __lowerCamelCase=False , __lowerCamelCase=False) -> Union[str, Any]:
def _flatten(__lowerCamelCase):
return list(itertools.chain(*__lowerCamelCase))
if equal_length:
_A : List[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_A : List[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_A : str = [np.asarray(__lowerCamelCase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self) -> Any:
_A : Dict = SpeechaTextFeatureExtractionTester(self)
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0) - 1) < 1e-3))
def _lowerCamelCase ( self) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Any = [np.asarray(__lowerCamelCase) for speech_input in speech_inputs]
# Test feature size
_A : List[Any] = feature_extractor(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
_A : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
_A : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
# Test batched
_A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
_A : Optional[int] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
# Test 2-D numpy arrays are batched.
_A : int = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_A : Optional[Any] = np.asarray(__lowerCamelCase)
_A : Dict = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
_A : Union[str, Any] = feature_extractor(__lowerCamelCase , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
def _lowerCamelCase ( self) -> Dict:
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : int = ["longest", "max_length", "do_not_pad"]
_A : int = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase):
_A : Optional[Any] = feature_extractor(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_attention_mask=__lowerCamelCase)
_A : Union[str, Any] = inputs.input_features
_A : int = inputs.attention_mask
_A : List[str] = [np.sum(__lowerCamelCase) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCamelCase ( self) -> Optional[int]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Any = ["longest", "max_length", "do_not_pad"]
_A : str = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase):
_A : Any = feature_extractor(
__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase)
_A : Dict = inputs.input_features
_A : str = inputs.attention_mask
_A : int = [np.sum(__lowerCamelCase) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Tuple = feature_extractor(
__lowerCamelCase , padding="max_length" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : Tuple = inputs.input_features
_A : Optional[int] = inputs.attention_mask
_A : Optional[Any] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Union[str, Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : Optional[int] = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : List[Any] = inputs.input_features
_A : int = inputs.attention_mask
_A : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4))
_A : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_A : List[Any] = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=1_6 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
_A : Optional[int] = inputs.input_features
_A : Tuple = inputs.attention_mask
_A : List[str] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4))
def _lowerCamelCase ( self) -> str:
import torch
_A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : str = np.random.rand(1_0_0 , 3_2).astype(np.floataa)
_A : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
_A : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
from datasets import load_dataset
_A : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
_A : Dict = ds.sort("id").select(range(__lowerCamelCase))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self) -> Any:
# fmt: off
_A : Dict = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
])
# fmt: on
_A : Union[str, Any] = self._load_datasamples(1)
_A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_A : Tuple = feature_extractor(__lowerCamelCase , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4))
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCamelCase , atol=1e-4))
| 11 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
model.train()
SCREAMING_SNAKE_CASE_: Any = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = F.mse_loss(_UpperCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: Tuple = RegressionModel()
SCREAMING_SNAKE_CASE_: Optional[int] = deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_: List[Any] = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: str = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
SCREAMING_SNAKE_CASE_: List[Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A_ ( _UpperCAmelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Optional[Any] = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Any = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[str] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Dict = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
GradientState._reset_state()
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase , _UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
SCREAMING_SNAKE_CASE_: Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_: Dict = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_: Any = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if iteration < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if batch_num < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_: int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_UpperCAmelCase , _UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 13 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[str]:
"""simple docstring"""
A__ = []
A__ = 11
A__ = int('''1''' + '''0''' * digit_len )
for num in range(lowercase_ , lowercase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase_ , lowercase_ ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
A__ = 10
return solutions
def SCREAMING_SNAKE_CASE ( lowercase_ = 2 ) -> int:
"""simple docstring"""
A__ = 1.0
for fraction in fraction_list(lowercase_ ):
A__ = Fraction(lowercase_ )
result *= frac.denominator / frac.numerator
return int(lowercase_ )
if __name__ == "__main__":
print(solution())
| 14 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : List[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : int = 0.0_1
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
lowercase__ : int = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Any = '''a''' * 10_00 + '''.lock'''
lowercase__ : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
lowercase__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 16 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(UpperCamelCase_, (list, tuple)) or not all(
isinstance(UpperCamelCase_, UpperCamelCase_) for number in numbers):
raise ValueError("numbers must be an iterable of integers")
__lowercase = __lowercase = __lowercase = numbers[0]
for i in range(1, len(UpperCamelCase_)):
# update the maximum and minimum subarray products
__lowercase = numbers[i]
if number < 0:
__lowercase ,__lowercase = min_till_now, max_till_now
__lowercase = max(UpperCamelCase_, max_till_now * number)
__lowercase = min(UpperCamelCase_, min_till_now * number)
# update the maximum product found till now
__lowercase = max(UpperCamelCase_, UpperCamelCase_)
return max_prod
| 17 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class a__ ( A__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A = Features({'question': Value('string' ), 'context': Value('string' )} )
A = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A = "question"
A = "context"
A = "answers"
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 18 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(lowerCamelCase__ , lowerCamelCase__ )
# Predict target for test data
lowerCamelCase_ = xgb.predict(lowerCamelCase__ )
lowerCamelCase_ = predictions.reshape(len(lowerCamelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ):
lowerCamelCase_ = fetch_california_housing()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 , random_state=1 )
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase__ , lowerCamelCase__ )}' )
print(F'Mean Square Error : {mean_squared_error(lowerCamelCase__ , lowerCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[list]:
lowercase : Dict = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
if magnitude == 0:
lowercase : Union[str, Any] = column
continue
lowercase : List[Any] = column / magnitude
# Subtract to cancel term
lowercase : Tuple = current_set[0]
lowercase : Tuple = [first_row]
lowercase : Union[str, Any] = current_set[1::]
for row in current_set:
lowercase : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE__ )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase : List[str] = final_set[0]
lowercase : Dict = []
lowercase : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase : Dict = simplify(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE__ )
lowercase : Any = resultant
return final_set
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase : str = len(SCREAMING_SNAKE_CASE__ ) + 1
if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase : str = equations.copy()
if any(0 in row for row in data_set ):
lowercase : Dict = data_set.copy()
lowercase : int = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
if 0 not in row:
lowercase : Tuple = data_set.pop(SCREAMING_SNAKE_CASE__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , SCREAMING_SNAKE_CASE__ )
lowercase : Any = data_set.copy()
lowercase : List[str] = simplify(SCREAMING_SNAKE_CASE__ )
lowercase : int = simplified[::-1]
lowercase : list = []
for row in simplified:
lowercase : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase : int = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
solutions.append(0 )
continue
lowercase : Optional[Any] = temp_row[1::]
lowercase : Dict = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 20 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__: int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = XLMProphetNetTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A ( self : Dict ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Dict = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Any ) -> Tuple:
UpperCAmelCase : Any = '''[PAD]'''
UpperCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 1012 )
def A ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def A ( self : str ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : int = '''Hello World!'''
UpperCAmelCase : str = [35389, 6672, 49, 2]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A ( self : Dict ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : int = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 23 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] , a__ : str = "cpu" , a__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(a__ )
__snake_case = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__snake_case = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case = torchvision.transforms.Resize(224 )
__snake_case = torchvision.transforms.CenterCrop(224 )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.resize(a__ )
__snake_case = self.center_crop(a__ )
__snake_case = self.normalize(a__ )
return images
def __call__(self : Union[str, Any] , a__ : Tuple=None , a__ : Tuple=None , **a__ : int ):
"""simple docstring"""
__snake_case = self.tokenizer(text=a__ , **a__ )
__snake_case = self.preprocess_img(a__ )
__snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : Dict=10 , a__ : Dict=0.0_1 , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Optional[Any]=None , a__ : Dict=None , a__ : Union[str, Any]=None , a__ : List[str]=None , a__ : Union[str, Any]=False , a__ : Dict=True , a__ : Dict="image" , a__ : Optional[Any]=True , a__ : List[str]=False , a__ : Optional[Any]=False , a__ : List[str]=False , ):
"""simple docstring"""
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=a__ , ckpt_path=a__ )
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__snake_case = ProcessorGradientFlow(device=self.device )
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def a (self : str , a__ : Optional[int]=None , a__ : Tuple=None , a__ : str=5 , a__ : int=True ):
"""simple docstring"""
__snake_case = []
if output_path is None:
__snake_case = '''./animation.gif'''
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '''/*''' ) )
if not len(a__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(a__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__snake_case = total_duration / len(a__ )
__snake_case = [frame_duration] * len(a__ )
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(a__ ) )
imageio.mimsave(a__ , a__ , duration=a__ )
print(f"""gif saved to {output_path}""" )
def a (self : List[Any] , a__ : List[str]=None , a__ : Optional[int]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(a__ ) , target_image_size=256 ).to(self.device )
__snake_case = preprocess_vqgan(a__ )
__snake_case , *__snake_case = self.vqgan.encode(a__ )
return z
def a (self : List[str] , a__ : int ):
"""simple docstring"""
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(a__ )
else:
__snake_case = trans_latent
return self.vqgan.decode(a__ )
def a (self : int , a__ : Dict , a__ : str , a__ : str=None ):
"""simple docstring"""
__snake_case = self.clip_preprocessor(text=a__ , images=a__ , return_tensors='''pt''' , padding=a__ )
__snake_case = self.clip(**a__ )
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def a (self : Dict , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int ):
"""simple docstring"""
__snake_case = self._get_clip_similarity(pos_prompts['''prompts'''] , a__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['''prompts'''] , a__ , weights=neg_prompts['''weights'''] )
else:
__snake_case = torch.tensor([1] , device=self.device )
__snake_case = -torch.log(a__ ) + torch.log(a__ )
return loss
def a (self : Any , a__ : Optional[Any] , a__ : List[str] , a__ : Tuple ):
"""simple docstring"""
__snake_case = torch.randn_like(self.latent , requires_grad=a__ , device=self.device )
__snake_case = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case = self._add_vector(a__ )
__snake_case = loop_post_process(a__ )
__snake_case = self._get_CLIP_loss(a__ , a__ , a__ )
print('''CLIP loss''' , a__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=a__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a (self : int , a__ : Optional[Any] , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
wandb.init(reinit=a__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__snake_case = Image.open(a__ )
__snake_case = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(a__ ) )
def a (self : Optional[int] , a__ : int ):
"""simple docstring"""
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(a__ , a__ ):
__snake_case = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(a__ , (tuple, list) ):
__snake_case = prompt[0]
__snake_case = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(''':''' )
__snake_case = float(a__ )
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(a__ )
weights.append(a__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a__ , device=self.device ),
}
def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : Any=None , a__ : List[Any]=None , a__ : Optional[int]=True , a__ : Any=False , a__ : Any=True , a__ : int=True , a__ : List[str]=None , ):
"""simple docstring"""
if image_path:
__snake_case = self._get_latent(a__ )
else:
__snake_case = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a__ , a__ , a__ )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(a__ )
__snake_case = self.process_prompts(a__ )
if save_final and save_path is None:
__snake_case = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
__snake_case = save_path + '''_''' + get_timestamp()
os.makedirs(a__ )
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(a__ ) )
__snake_case = loop_post_process(a__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a__ , a__ , a__ ) ):
if show_intermediate:
show_pil(a__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(a__ )} )
if show_final:
show_pil(a__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 24 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase__ : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase__ : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase__ : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 25 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device("cpu")
def lowerCAmelCase_ ( ):
_A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
def lowerCAmelCase_ ( snake_case_ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = dct.pop(snake_case_ )
_A : Optional[Any] = val
def lowerCAmelCase_ ( snake_case_ ):
_A : int = []
for k in state_dict.keys():
_A : Optional[Any] = k
if ".pwconv" in k:
_A : int = k_new.replace(""".pwconv""",""".point_wise_conv""" )
if ".dwconv" in k:
_A : Dict = k_new.replace(""".dwconv""",""".depth_wise_conv""" )
if ".Proj." in k:
_A : str = k_new.replace(""".Proj.""",""".proj.""" )
if "patch_embed" in k_new:
_A : Optional[Any] = k_new.replace("""patch_embed""","""swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_A : Any = k_new.split(""".""" )
if ls[2].isdigit():
_A : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_A : Optional[Any] = k_new.replace("""network""","""swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : str = 1000
_A : List[str] = """huggingface/label-files"""
_A : Union[str, Any] = """imagenet-1k-id2label.json"""
_A : Tuple = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : int = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Any = idalabel
_A : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : List[Any] = [3, 3, 6, 4]
_A : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : Dict = [3, 3, 9, 6]
_A : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : Optional[Any] = [4, 3, 10, 5]
_A : Optional[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Tuple = [4, 4, 12, 6]
_A : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ )
else:
_A : List[Any] = torch.load(snake_case_,map_location="""cpu""" )
_A : Dict = checkpoint
_A : Dict = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_A : Any = processor(images=snake_case_,return_tensors="""pt""" )
# compare outputs from both models
_A : Union[str, Any] = get_expected_output(snake_case_ )
_A : List[Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5],snake_case_,atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Dict = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _SCREAMING_SNAKE_CASE ).groups()[0]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=None , __a=None ):
'''simple docstring'''
__a : Any = file_names
__a : List[str] = image_transform
__a : List[str] = label_to_id
def __len__( self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , __a ):
'''simple docstring'''
__a : Dict = self.file_names[idx]
__a : Tuple = PIL.Image.open(__a )
__a : int = raw_image.convert('RGB' )
if self.image_transform is not None:
__a : List[Any] = self.image_transform(__a )
__a : List[str] = extract_label(__a )
if self.label_to_id is not None:
__a : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ):
# Initialize accelerator
if args.with_tracking:
__a : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__a : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Optional[int] = config['lr']
__a : Optional[Any] = int(config['num_epochs'] )
__a : Tuple = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Union[str, Any] = config['image_size']
if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
__a : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__a : Optional[int] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__a : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__a : Optional[Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__a : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('.' )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Grab all the image filenames
__a : Optional[int] = [os.path.join(args.data_dir , _SCREAMING_SNAKE_CASE ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__a : List[str] = [extract_label(_SCREAMING_SNAKE_CASE ) for fname in file_names]
__a : Tuple = list(set(_SCREAMING_SNAKE_CASE ) )
id_to_label.sort()
__a : Optional[Any] = {lbl: i for i, lbl in enumerate(_SCREAMING_SNAKE_CASE )}
# Set the seed before splitting the data.
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# Split our filenames between train and validation
__a : int = np.random.permutation(len(_SCREAMING_SNAKE_CASE ) )
__a : str = int(0.8 * len(_SCREAMING_SNAKE_CASE ) )
__a : List[Any] = random_perm[:cut]
__a : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__a : str = Compose([RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.5, 1.0) ), ToTensor()] )
__a : Dict = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE )
# For evaluation, we use a deterministic Resize
__a : Optional[int] = Compose([Resize(_SCREAMING_SNAKE_CASE ), ToTensor()] )
__a : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=_SCREAMING_SNAKE_CASE , label_to_id=_SCREAMING_SNAKE_CASE )
# Instantiate dataloaders.
__a : Dict = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
__a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : List[Any] = create_model('resnet50d' , pretrained=_SCREAMING_SNAKE_CASE , num_classes=len(_SCREAMING_SNAKE_CASE ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__a : str = False
for param in model.get_classifier().parameters():
__a : List[Any] = True
# We normalize the batches of images to be a bit faster.
__a : List[Any] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__a : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__a : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__a : Union[str, Any] = OneCycleLR(optimizer=_SCREAMING_SNAKE_CASE , max_lr=_SCREAMING_SNAKE_CASE , epochs=_SCREAMING_SNAKE_CASE , steps_per_epoch=len(_SCREAMING_SNAKE_CASE ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
__a : Tuple = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__a : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__a : Any = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__a : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__a : Optional[int] = os.path.splitext(_SCREAMING_SNAKE_CASE )[0]
if "epoch" in training_difference:
__a : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
__a : str = None
else:
__a : str = int(training_difference.replace('step_' , '' ) )
__a : Union[str, Any] = resume_step // len(_SCREAMING_SNAKE_CASE )
resume_step -= starting_epoch * len(_SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
if args.with_tracking:
__a : Dict = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__a : int = accelerator.skip_first_batches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__a : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__a : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
__a : Tuple = (batch['image'] - mean) / std
__a : List[str] = model(_SCREAMING_SNAKE_CASE )
__a : List[Any] = torch.nn.functional.cross_entropy(_SCREAMING_SNAKE_CASE , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[int] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__a : List[Any] = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
model.eval()
__a : str = 0
__a : List[Any] = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__a : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
__a : Tuple = (batch['image'] - mean) / std
with torch.no_grad():
__a : List[Any] = model(_SCREAMING_SNAKE_CASE )
__a : str = outputs.argmax(dim=-1 )
__a , __a : Dict = accelerator.gather_for_metrics((predictions, batch['label']) )
__a : Optional[int] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__a : int = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
'epoch': epoch,
} , step=_SCREAMING_SNAKE_CASE , )
if checkpointing_steps == "epoch":
__a : str = F"""epoch_{epoch}"""
if args.output_dir is not None:
__a : Tuple = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ():
__a : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_SCREAMING_SNAKE_CASE , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_SCREAMING_SNAKE_CASE , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__a : Tuple = parser.parse_args()
__a : int = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426_880 * Decimal(10_005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13_591_409
UpperCamelCase = Decimal(A__ )
for k in range(1 , A__ ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(A__ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 28 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
def lowercase__ ( __snake_case : str , __snake_case : str = " " ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = 0
for index, char in enumerate(__snake_case ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Dict = index + 1
elif index + 1 == len(__snake_case ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = '''ylacombe/bark-small'''
lowercase_ = tempfile.mkdtemp()
lowercase_ = '''en_speaker_1'''
lowercase_ = '''This is a test string'''
lowercase_ = '''speaker_embeddings_path.json'''
lowercase_ = '''speaker_embeddings'''
def _lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowercase ( self : Dict ) -> Any:
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowercase ( self : int ) -> Optional[int]:
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ = 3_5
lowercase_ = 2
lowercase_ = 8
lowercase_ = {
'''semantic_prompt''': np.ones(SCREAMING_SNAKE_CASE_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowercase_ = processor(text=self.input_string )
lowercase_ = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A )
def _A ( self : int ):
_UpperCAmelCase : int = logging.get_verbosity()
_UpperCAmelCase : int = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[str] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _A ( self : Dict ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , A )
_UpperCAmelCase : Any = logging.log_levels[env_level_str]
_UpperCAmelCase : Tuple = logging.get_verbosity()
self.assertEqual(
A , A , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_UpperCAmelCase : int = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _A ( self : Union[str, Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : List[Any] = logging.logging.getLogger()
with CaptureLogger(A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _A ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , msg + "\n" )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 31 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : List[Any] , A : Tuple=7 , A : Dict=3 , A : Optional[Any]=18 , A : Union[str, Any]=30 , A : List[str]=4_00 , A : List[Any]=True , A : Union[str, Any]=32 , A : Any=True , ) -> List[Any]:
lowercase_ : Optional[Any] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Any = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Optional[int] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : str = do_resize
lowercase_ : Optional[Any] = size_divisor
lowercase_ : List[Any] = do_rescale
def A ( self : Dict ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = GLPNImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ) -> List[str]:
lowercase_ : Any = GLPNImageProcessingTester(self )
@property
def A ( self : int ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> int:
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size_divisor''' ) )
self.assertTrue(hasattr(A , '''resample''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
def A ( self : Any ) -> str:
pass
def A ( self : Tuple ) -> Tuple:
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : List[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
A =logging.get_logger(__name__) # pylint: disable=invalid-name
A ='\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def snake_case_ (_a : Dict , _a : Optional[int] , _a : Any=8 ):
UpperCAmelCase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( __a ):
def __init__( self : List[Any] , lowercase : MultilingualCLIP , lowercase : XLMRobertaTokenizer , lowercase : UNetaDConditionModel , lowercase : Union[DDIMScheduler, DDPMScheduler] , lowercase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , movq=lowercase , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : int , lowercase : List[Any] , lowercase : int , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
if latents is None:
UpperCAmelCase = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(lowercase )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : int , lowercase : str , lowercase : Dict=None , ):
'''simple docstring'''
UpperCAmelCase = len(lowercase ) if isinstance(lowercase , lowercase ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
lowercase , padding='''max_length''' , truncation=lowercase , max_length=77 , return_attention_mask=lowercase , add_special_tokens=lowercase , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
UpperCAmelCase = self.tokenizer(lowercase , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase , lowercase ):
UpperCAmelCase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase = text_input_ids.to(lowercase )
UpperCAmelCase = text_inputs.attention_mask.to(lowercase )
UpperCAmelCase , UpperCAmelCase = self.text_encoder(
input_ids=lowercase , attention_mask=lowercase )
UpperCAmelCase = prompt_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = text_encoder_hidden_states.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = text_mask.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = 42
if negative_prompt is None:
UpperCAmelCase = [''''''] * batch_size
elif type(lowercase ) is not type(lowercase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !="
f" {type(lowercase )}." )
elif isinstance(lowercase , lowercase ):
UpperCAmelCase = [negative_prompt]
elif batch_size != len(lowercase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase = negative_prompt
UpperCAmelCase = self.tokenizer(
lowercase , padding='''max_length''' , max_length=77 , truncation=lowercase , return_attention_mask=lowercase , add_special_tokens=lowercase , return_tensors='''pt''' , )
UpperCAmelCase = uncond_input.input_ids.to(lowercase )
UpperCAmelCase = uncond_input.attention_mask.to(lowercase )
UpperCAmelCase , UpperCAmelCase = self.text_encoder(
input_ids=lowercase , attention_mask=lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowercase )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase )
UpperCAmelCase = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase = uncond_text_encoder_hidden_states.repeat(1 , lowercase , 1 )
UpperCAmelCase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase , -1 )
UpperCAmelCase = uncond_text_mask.repeat_interleave(lowercase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def A ( self : Any , lowercase : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def A ( self : str , lowercase : str=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
if self.safety_checker is not None:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(self.safety_checker , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self : Optional[Any] , lowercase : Union[str, List[str]] , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : Optional[Union[str, List[str]]] = None , lowercase : int = 512 , lowercase : int = 512 , lowercase : int = 100 , lowercase : float = 4.0 , lowercase : int = 1 , lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
UpperCAmelCase = 1
elif isinstance(lowercase , lowercase ):
UpperCAmelCase = len(lowercase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowercase )}" )
UpperCAmelCase = self._execution_device
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._encode_prompt(
lowercase , lowercase , lowercase , lowercase , lowercase )
if isinstance(lowercase , lowercase ):
UpperCAmelCase = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
UpperCAmelCase = torch.cat(lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase = get_new_h_w(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , ).prev_sample
# post-processing
UpperCAmelCase = self.movq.decode(lowercase , force_not_quantize=lowercase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 34 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__a = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__a = {"facebook/blenderbot_small-90M": 512}
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Any = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : str = char
snake_case__ : Dict = set(_lowerCAmelCase )
return pairs
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self : str , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int="__start__" , snake_case_ : Optional[int]="__end__" , snake_case_ : str="__unk__" , snake_case_ : List[Any]="__null__" , **snake_case_ : int , ):
super().__init__(unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
snake_case__ : Dict = json.load(snake_case_ )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("""\n""" )[1:-1]
snake_case__ : Tuple = [tuple(merge.split() ) for merge in merges]
snake_case__ : Any = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Dict = {}
@property
def lowerCamelCase ( self : str ):
return len(self.encoder )
def lowerCamelCase ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Dict , snake_case_ : str ):
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = re.sub("""([.,!?()])""" , r""" \1""" , snake_case_ )
snake_case__ : Optional[int] = re.sub("""(')""" , r""" \1 """ , snake_case_ )
snake_case__ : int = re.sub(r"""\s{2,}""" , """ """ , snake_case_ )
if "\n" in token:
snake_case__ : int = token.replace("""\n""" , """ __newln__""" )
snake_case__ : Dict = token.split(""" """ )
snake_case__ : Union[str, Any] = []
for token in tokens:
if not len(snake_case_ ):
continue
snake_case__ : List[str] = token.lower()
snake_case__ : Dict = tuple(snake_case_ )
snake_case__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case__ : int = get_pairs(snake_case_ )
if not pairs:
words.append(snake_case_ )
continue
while True:
snake_case__ : Tuple = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : List[Any] = bigram
snake_case__ : str = []
snake_case__ : Tuple = 0
while i < len(snake_case_ ):
try:
snake_case__ : Union[str, Any] = word.index(snake_case_ , snake_case_ )
new_word.extend(word[i:j] )
snake_case__ : List[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : int = tuple(snake_case_ )
snake_case__ : Optional[Any] = new_word
if len(snake_case_ ) == 1:
break
else:
snake_case__ : str = get_pairs(snake_case_ )
snake_case__ : Optional[int] = """@@ """.join(snake_case_ )
snake_case__ : Any = word[:-4]
snake_case__ : str = word
words.append(snake_case_ )
return " ".join(snake_case_ )
def lowerCamelCase ( self : Tuple , snake_case_ : str ):
snake_case__ : Tuple = []
snake_case__ : str = re.findall(r"""\S+\n?""" , snake_case_ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case_ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ):
snake_case__ : Dict = token.lower()
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : int , snake_case_ : int ):
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCamelCase ( self : Any , snake_case_ : List[str] ):
snake_case__ : List[str] = """ """.join(snake_case_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Union[str, Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
snake_case__ : Optional[Any] = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
snake_case__ : int = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 35 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Optional[int]=False ) -> Tuple:
"""simple docstring"""
try:
UpperCamelCase :str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase :str = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase :Union[str, Any] = strtobool(__magic_name__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCAmelCase_ : Any = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Dict = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : str = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Any:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase :List[str] = unittest.skip("""test requires faiss""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> int:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase :Dict = unittest.skip("""test requires regex""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Dict:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase :int = unittest.skip("""test requires elasticsearch""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase :Tuple = unittest.skip("""test requires sqlalchemy""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase :int = unittest.skip("""test requires PyTorch""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase :str = unittest.skip("""test requires TensorFlow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase :Optional[int] = unittest.skip("""test requires JAX""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase :int = unittest.skip("""test requires Pillow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__magic_name__ : List[str] ):
try:
import spacy # noqa F401
spacy.load(__magic_name__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(__magic_name__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__magic_name__ ) )(__magic_name__ )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__magic_name__ )
else:
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test is slow""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase :str = unittest.skip("""test is local""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test is packaged""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase :Optional[int] = unittest.skip("""test requires remote""" )(__magic_name__ )
return test_case
def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : int ) -> List[str]:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__magic_name__ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCamelCase :int = decorator(__magic_name__ )
setattr(cls , __magic_name__ , __magic_name__ )
return cls
return decorate
class _SCREAMING_SNAKE_CASE ( _a ):
pass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = 0
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = 2
@contextmanager
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , __magic_name__ : str=1E-16 ) -> Any:
"""simple docstring"""
UpperCamelCase :Optional[int] = requests.Session().request
def timeout_request(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , **__magic_name__ : Dict ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase :Dict = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCamelCase :List[str] = timeout
try:
return online_request(__magic_name__ , __magic_name__ , **__magic_name__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase :Any = url
UpperCamelCase :Any = e.args[0]
UpperCamelCase :Tuple = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
UpperCamelCase :List[str] = (max_retry_error,)
raise
def raise_connection_error(__magic_name__ : Tuple , __magic_name__ : int , **__magic_name__ : str ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=__magic_name__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , __magic_name__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , __magic_name__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __magic_name__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : str , **__magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__magic_name__ , **__magic_name__ ) as tmp_dir:
try:
os.chdir(__magic_name__ )
yield
finally:
os.chdir(__magic_name__ )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase :Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase :Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__magic_name__ : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ):
try:
return func(*__magic_name__ , **__magic_name__ )
except HTTPError as err:
if str(__magic_name__ ).startswith("""500""" ) or str(__magic_name__ ).startswith("""502""" ):
pytest.xfail(str(__magic_name__ ) )
raise err
return decorator.decorator(_wrapper , __magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
UpperCamelCase :int = returncode
UpperCamelCase :Tuple = stdout
UpperCamelCase :List[str] = stderr
async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
while True:
UpperCamelCase :List[str] = await stream.readline()
if line:
callback(__magic_name__ )
else:
break
async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=None , __magic_name__ : int=None , __magic_name__ : int=False , __magic_name__ : str=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(__magic_name__ ) )
UpperCamelCase :List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__magic_name__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__magic_name__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase :List[Any] = []
UpperCamelCase :List[Any] = []
def tee(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any]="" ):
UpperCamelCase :List[Any] = line.decode("""utf-8""" ).rstrip()
sink.append(__magic_name__ )
if not quiet:
print(__magic_name__ , __magic_name__ , file=__magic_name__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=__magic_name__ , )
return _RunOutput(await p.wait() , __magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=180 , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=True ) -> _RunOutput:
"""simple docstring"""
UpperCamelCase :str = asyncio.get_event_loop()
UpperCamelCase :Optional[int] = loop.run_until_complete(
_stream_subprocess(__magic_name__ , env=__magic_name__ , stdin=__magic_name__ , timeout=__magic_name__ , quiet=__magic_name__ , echo=__magic_name__ ) )
UpperCamelCase :Tuple = """ """.join(__magic_name__ )
if result.returncode > 0:
UpperCamelCase :int = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :int = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCamelCase :str = re.sub(R"""^gw""" , """""" , __magic_name__ , 0 , re.M )
return int(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = 2_9500
UpperCamelCase :Union[str, Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 38 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = val
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
_UpperCAmelCase = val
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
if root:
inorder(root.left , __lowerCAmelCase )
res.append(root.val )
inorder(root.right , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return arr
_UpperCAmelCase = Node(arr[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase = []
inorder(__lowerCAmelCase , __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 39 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self : Optional[Any] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Dict[str, int]] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Union[str, Any] , ):
super().__init__(**__UpperCAmelCase)
a : Any = size if size is not None else {"shortest_edge": 256}
a : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
a : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : Dict = get_size_dict(__UpperCAmelCase)
a : List[Any] = do_resize
a : Dict = size
a : str = resample
a : Tuple = do_center_crop
a : int = crop_size
a : Dict = do_rescale
a : List[str] = rescale_factor
a : Optional[Any] = do_normalize
a : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str] , ):
a : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
a : int = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase)
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
a : Dict = get_size_dict(__UpperCAmelCase)
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str]):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Optional[Any] , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCAmelCase : List[str] , ):
a : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a : Any = size if size is not None else self.size
a : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
a : List[Any] = resample if resample is not None else self.resample
a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Optional[Any] = get_size_dict(__UpperCAmelCase)
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a : Any = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Tuple = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
a : int = [to_numpy_array(__UpperCAmelCase) for image in images]
if do_resize:
a : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_center_crop:
a : List[Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase) for image in images]
if do_rescale:
a : List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase) for image in images]
if do_normalize:
a : Union[str, Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase) for image in images]
a : Any = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : List[Any] = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 40 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[int]:
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : int = 2
lowerCamelCase__ : str = int(math.sqrt(UpperCamelCase ) ) # Size of every segment
lowerCamelCase__ : Optional[int] = [True] * (end + 1)
lowerCamelCase__ : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCamelCase )
for i in range(start * start , end + 1 , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = False
start += 1
prime += in_prime
lowerCamelCase__ : Optional[int] = end + 1
lowerCamelCase__ : Tuple = min(2 * end , UpperCamelCase )
while low <= n:
lowerCamelCase__ : Dict = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase__ : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCamelCase , high + 1 , UpperCamelCase ):
lowerCamelCase__ : Tuple = False
for j in range(len(UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase__ : List[Any] = high + 1
lowerCamelCase__ : Any = min(high + end , UpperCamelCase )
return prime
print(sieve(10**6))
| 41 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = GPTSanJapaneseTokenizer
__lowercase = False
__lowercase = {"""do_clean_text""": False, """add_prefix_space""": False}
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
_snake_case = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_snake_case = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
_snake_case = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_input_output_texts(lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = 'こんにちは、世界。 こんばんは、㔺界。'
_snake_case = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
# Testing tokenization
_snake_case = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
_snake_case = 'こんにちは、、、、世界。こんばんは、、、、世界。'
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_snake_case = 'こんにちは、世界。'
_snake_case = 'こんばんは、㔺界。😀'
_snake_case = 'こんにちは、世界。こんばんは、世界。😀'
_snake_case = tokenizer.encode(prefix_text + input_text )
_snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
_snake_case = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_snake_case = 'こんにちは、世界。'
_snake_case = 'こんばんは、㔺界。😀'
_snake_case = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_snake_case = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_snake_case = [1] + [0] * (len_prefix + len_text + 1)
_snake_case = [1] * (len_prefix + len_text + 1) + [0]
_snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_snake_case = tokenizer(prefix_text + input_text ).token_type_ids
_snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
_snake_case = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_snake_case = tokenizer.encode('あンいワ' )
_snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
_snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_snake_case = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_snake_case = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 42 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__UpperCamelCase :Optional[int] = (
'''Expected a list of numbers as input, found '''
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import numpy as np
class __A :
def __init__( self ):
_lowerCAmelCase : Optional[Any] = (0, 0)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = 0
def __eq__( self , a__ ):
return self.position == cell.position
def __A ( self ):
print(self.position )
class __A :
def __init__( self , a__=(5, 5) ):
_lowerCAmelCase : List[Any] = np.zeros(a__ )
_lowerCAmelCase : List[str] = world_size[0]
_lowerCAmelCase : List[str] = world_size[1]
def __A ( self ):
print(self.w )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_lowerCAmelCase : Optional[Any] = cell.position[0]
_lowerCAmelCase : str = cell.position[1]
_lowerCAmelCase : Any = []
for n in neughbour_cord:
_lowerCAmelCase : List[str] = current_x + n[0]
_lowerCAmelCase : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_lowerCAmelCase : Optional[int] = Cell()
_lowerCAmelCase : List[str] = (x, y)
_lowerCAmelCase : Optional[int] = cell
neighbours.append(a__ )
return neighbours
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = []
_open.append(_lowerCamelCase )
while _open:
_lowerCAmelCase : List[str] = np.argmin([n.f for n in _open] )
_lowerCAmelCase : int = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
_lowerCAmelCase : Any = current.g + 1
_lowerCAmelCase , _lowerCAmelCase : Tuple = n.position
_lowerCAmelCase , _lowerCAmelCase : Dict = goal.position
_lowerCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_lowerCAmelCase : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
_lowerCAmelCase : Dict = []
while current.parent is not None:
path.append(current.position )
_lowerCAmelCase : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_a : Any = Gridworld()
# Start position and goal
_a : Optional[int] = Cell()
_a : Dict = (0, 0)
_a : Dict = Cell()
_a : Union[str, Any] = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
_a : List[str] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_a : Optional[Any] = 1
print(world.w)
| 44 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
__a , __a = grid.shape
__a = [-1, 1, 0, 0]
__a = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__a , __a = [(0, source)], set()
__a = np.full((rows, cols) , np.inf )
__a = 0
__a = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
__a = None
while queue:
((__a) , (__a)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__a = []
while (x, y) != source:
path.append((x, y) )
__a , __a = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
__a , __a = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__a = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
__a = dist + 1
__a = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _snake_case ( self , **lowercase ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> int:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowercase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowercase )
lowerCAmelCase = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowercase )
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 46 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 47 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["""onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
requires_backends(cls , ["onnx"] )
| 48 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__a = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__a = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__a = subset[i - 1][j]
if arr[i - 1] <= j:
__a = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
from math import pow
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__ : Union[str, Any] = int(pow(_UpperCAmelCase , _UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__ : str = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(_UpperCAmelCase , _UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ : List[str] = 8
def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' )
UpperCAmelCase_ = bits * 2 - 1
return bits
def A (__A : Dict , __A : Tuple=BITS ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(__A , __A )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu'''
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __snake_case ( a ):
def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device)
self.scheduler.set_timesteps(_snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = bits_to_decimal(_snake_case)
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = 0
UpperCamelCase : Any = len(_lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A_ ( _lowerCAmelCase ) -> Optional[int]:
if len(_lowerCAmelCase ) <= 1:
return arr, 0
UpperCamelCase : int = len(_lowerCAmelCase ) // 2
UpperCamelCase : Union[str, Any] = arr[0:mid]
UpperCamelCase : Optional[Any] = arr[mid:]
UpperCamelCase , UpperCamelCase : Optional[int] = count_inversions_recursive(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Optional[Any] = count_inversions_recursive(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : List[str] = _count_cross_inversions(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Dict = []
UpperCamelCase : int = 0
while i < len(_lowerCAmelCase ) and j < len(_lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCamelCase : Optional[int] = count_inversions_bf(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : str = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCamelCase : int = count_inversions_bf(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : List[Any] = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _lowerCAmelCase )
# an empty list should also have zero inversions
UpperCamelCase : List[str] = []
UpperCamelCase : Any = count_inversions_bf(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Tuple = count_inversions_recursive(_lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : List[str] ='''naver-clova-ix/donut-base'''
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = DonutProcessor.from_pretrained(__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
__UpperCamelCase = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
__UpperCamelCase = self.processor.tokenajson(__A )
self.assertDictEqual(__A , __A )
| 53 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [144, 192, 240]
__SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [96, 120, 144]
__SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [64, 80, 96]
__SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 320]
__SCREAMING_SNAKE_CASE = 0.05
__SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 21
__SCREAMING_SNAKE_CASE = "pascal-voc-id2label.json"
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__SCREAMING_SNAKE_CASE = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
__SCREAMING_SNAKE_CASE = name.replace(".block." , "." )
if "exp_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__SCREAMING_SNAKE_CASE = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__SCREAMING_SNAKE_CASE = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
__SCREAMING_SNAKE_CASE = name.replace(".norm." , ".normalization." )
if ".conv." in name:
__SCREAMING_SNAKE_CASE = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__SCREAMING_SNAKE_CASE = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.weight""" , ".layernorm.weight" )
if f""".global_rep.{i}.bias""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.bias""" , ".layernorm.bias" )
if ".global_rep." in name:
__SCREAMING_SNAKE_CASE = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__SCREAMING_SNAKE_CASE = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
__SCREAMING_SNAKE_CASE = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
__SCREAMING_SNAKE_CASE = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
__SCREAMING_SNAKE_CASE = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
__SCREAMING_SNAKE_CASE = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__SCREAMING_SNAKE_CASE = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
__SCREAMING_SNAKE_CASE = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__SCREAMING_SNAKE_CASE = "mobilevit." + name
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
if base_model:
__SCREAMING_SNAKE_CASE = ""
else:
__SCREAMING_SNAKE_CASE = "mobilevit."
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if key[:8] == "encoder.":
__SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__SCREAMING_SNAKE_CASE = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_mobilevit_config(lowerCAmelCase_ )
# load original state_dict
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(lowerCAmelCase_ ).eval()
else:
__SCREAMING_SNAKE_CASE = MobileViTForImageClassification(lowerCAmelCase_ ).eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
__SCREAMING_SNAKE_CASE = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization="apple" )
model.push_to_hub(lowerCAmelCase_ , organization="apple" )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ : Any = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 54 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : str = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ : int = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ : Tuple = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False ):
"""simple docstring"""
if rouge_types is None:
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCamelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = scoring.BootstrapAggregator()
else:
lowerCamelCase_ = []
for ref, pred in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = scorer.score(UpperCamelCase , UpperCamelCase )
if use_aggregator:
aggregator.add_scores(UpperCamelCase )
else:
scores.append(UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = aggregator.aggregate()
else:
lowerCamelCase_ = {}
for key in scores[0]:
lowerCamelCase_ = [score[key] for score in scores]
return result
| 55 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
def __init__( self : Any , **lowercase_ : Any ):
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : Dict , lowercase_ : Union[np.ndarray, bytes, str] , **lowercase_ : Dict ):
return super().__call__(lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , **lowercase_ : List[str] ):
snake_case_ = {}
if "candidate_labels" in kwargs:
snake_case_ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Tuple="This is a sound of {}." ):
if isinstance(lowercase_ , lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case_ = requests.get(lowercase_ ).content
else:
with open(lowercase_ , '''rb''' ) as f:
snake_case_ = f.read()
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = ffmpeg_read(lowercase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowercase_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
snake_case_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
snake_case_ = candidate_labels
snake_case_ = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case_ = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
snake_case_ = [text_inputs]
return inputs
def A_ ( self : int , lowercase_ : str ):
snake_case_ = model_inputs.pop('''candidate_labels''' )
snake_case_ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowercase_ ):
snake_case_ = text_inputs[0]
else:
# Batching case.
snake_case_ = text_inputs[0][0]
snake_case_ = self.model(**lowercase_ , **lowercase_ )
snake_case_ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : Any , lowercase_ : int ):
snake_case_ = model_outputs.pop('''candidate_labels''' )
snake_case_ = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ = logits.softmax(dim=0 )
snake_case_ = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
snake_case_ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 56 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
'''simple docstring'''
import re
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__lowerCamelCase , __lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 58 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Optional[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase ( __lowerCamelCase : int = 5000 ):
snake_case : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCamelCase )]
for i, pentagonal_i in enumerate(__lowerCamelCase ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
snake_case : List[Any] = pentagonal_nums[j]
snake_case : Union[str, Any] = pentagonal_i + pentagonal_j
snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(__lowerCamelCase ) and is_pentagonal(__lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 59 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
"""simple docstring"""
import numpy as np
import datasets
snake_case__ : Dict = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
snake_case__ : Union[str, Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
snake_case__ : Any = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
# convert to numpy arrays
lowerCAmelCase : List[str] = np.array(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase : List[Any] = X - np.mean(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
lowerCAmelCase : Dict = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
lowerCAmelCase : List[str] = np.linalg.pinv(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 60 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __a ( __lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = analyze_text(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase_ : Optional[Any] = sum(single_char_strings.values() )
# one length string
UpperCAmelCase_ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase_ : Dict = single_char_strings[ch]
UpperCAmelCase_ : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase_ : int = sum(two_char_strings.values() )
UpperCAmelCase_ : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase_ : Tuple = cha + cha
if sequence in two_char_strings:
UpperCAmelCase_ : Dict = two_char_strings[sequence]
UpperCAmelCase_ : Optional[int] = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = Counter() # type: ignore
UpperCAmelCase_ : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __a ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DebertaVaTokenizer
UpperCAmelCase__ : Tuple = DebertaVaTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = True
def _a ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =DebertaVaTokenizer(A_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , A_ ) -> Dict:
__UpperCamelCase ='this is a test'
__UpperCamelCase ='this is a test'
return input_text, output_text
def _a ( self ) -> Tuple:
__UpperCamelCase ='<pad>'
__UpperCamelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(A_ ) , 30001 )
def _a ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _a ( self ) -> List[Any]:
# fmt: off
__UpperCamelCase =' \tHeLLo!how \n Are yoU? '
__UpperCamelCase =['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _a ( self ) -> Optional[int]:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _a ( self ) -> str:
pass
def _a ( self ) -> Dict:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Any:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Union[str, Any]:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# fmt: off
__UpperCamelCase =' \tHeLLo!how \n Are yoU? '
__UpperCamelCase =['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase =tokenizer.encode(A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase ='This is a test'
__UpperCamelCase =[13, 1, 4398, 25, 21, 1289]
__UpperCamelCase =['▁', 'T', 'his', '▁is', '▁a', '▁test']
__UpperCamelCase =['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__UpperCamelCase =DebertaVaTokenizer(A_ , keep_accents=A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , keep_accents=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__UpperCamelCase =['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =DebertaVaTokenizer(A_ )
__UpperCamelCase =tokenizer.encode('sequence builders' )
__UpperCamelCase =tokenizer.encode('multi-sequence build' )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase ={'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 62 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase_ : str = {'facebook/blenderbot_small-90M': 5_12}
def _lowerCamelCase ( lowercase : List[Any] ) -> List[str]:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(lowercase )
return pairs
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : Optional[int]="__start__" , __a : Union[str, Any]="__end__" , __a : Any="__unk__" , __a : Union[str, Any]="__null__" , **__a : Tuple , ):
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
_a = json.load(__a )
_a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_a = merges_handle.read().split("\n" )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = {}
@property
def UpperCamelCase__ ( self : Any ):
return len(self.encoder )
def UpperCamelCase__ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Tuple , __a : str ):
if token in self.cache:
return self.cache[token]
_a = re.sub("([.,!?()])" , r" \1" , __a )
_a = re.sub("(')" , r" \1 " , __a )
_a = re.sub(r"\s{2,}" , " " , __a )
if "\n" in token:
_a = token.replace("\n" , " __newln__" )
_a = token.split(" " )
_a = []
for token in tokens:
if not len(__a ):
continue
_a = token.lower()
_a = tuple(__a )
_a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
_a = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__a ):
try:
_a = word.index(__a , __a )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__a )
_a = new_word
if len(__a ) == 1:
break
else:
_a = get_pairs(__a )
_a = "@@ ".join(__a )
_a = word[:-4]
_a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
_a = []
_a = re.findall(r"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self : Optional[int] , __a : str ):
_a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : Tuple , __a : List[str] ):
_a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 63 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["input_values", "attention_mask"]
def __init__( self: Union[str, Any], a_: int = 1, a_: int = 16_000, a_: float = 0.0, a_: bool = False, a_: int = 80, a_: int = 16, a_: int = 64, a_: str = "hann_window", a_: float = 1.0, a_: float = 80, a_: float = 7_600, a_: float = 1E-10, a_: int = 2, a_: bool = True, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(feature_size=a_, sampling_rate=a_, padding_value=a_, **a_ )
_snake_case : Any = do_normalize
_snake_case : Optional[int] = return_attention_mask
_snake_case : Optional[Any] = num_mel_bins
_snake_case : Optional[Any] = hop_length
_snake_case : List[Any] = win_length
_snake_case : int = win_function
_snake_case : Dict = frame_signal_scale
_snake_case : Union[str, Any] = fmin
_snake_case : Union[str, Any] = fmax
_snake_case : Optional[Any] = mel_floor
_snake_case : Optional[int] = reduction_factor
_snake_case : List[str] = win_length * sampling_rate // 1_000
_snake_case : str = hop_length * sampling_rate // 1_000
_snake_case : Optional[int] = optimal_fft_length(self.sample_size )
_snake_case : List[str] = (self.n_fft // 2) + 1
_snake_case : int = window_function(window_length=self.sample_size, name=self.win_function, periodic=a_ )
_snake_case : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm="""slaney""", mel_scale="""slaney""", )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""", a_, )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""", a_, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( a_: List[np.ndarray], a_: List[np.ndarray], a_: float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
_snake_case : Union[str, Any] = np.array(a_, np.intaa )
_snake_case : Optional[int] = []
for vector, length in zip(a_, attention_mask.sum(-1 ) ):
_snake_case : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_snake_case : Union[str, Any] = padding_value
normed_input_values.append(a_ )
else:
_snake_case : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase_ ( self: str, a_: np.ndarray, ):
'''simple docstring'''
_snake_case : Tuple = spectrogram(
a_, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel="""log10""", )
return log_mel_spec.T
def __call__( self: Tuple, a_: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, a_: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, a_: Union[bool, str, PaddingStrategy] = False, a_: Optional[int] = None, a_: bool = False, a_: Optional[int] = None, a_: Optional[bool] = None, a_: Optional[Union[str, TensorType]] = None, a_: Optional[int] = None, **a_: Any, ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
_snake_case : Optional[Any] = self._process_audio(
a_, a_, a_, a_, a_, a_, a_, a_, **a_, )
else:
_snake_case : Optional[int] = None
if audio_target is not None:
_snake_case : List[Any] = self._process_audio(
a_, a_, a_, a_, a_, a_, a_, a_, **a_, )
if inputs is None:
return inputs_target
else:
_snake_case : Tuple = inputs_target["""input_values"""]
_snake_case : Any = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
_snake_case : Optional[Any] = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self: Dict, a_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_: bool = False, a_: Union[bool, str, PaddingStrategy] = False, a_: Optional[int] = None, a_: bool = False, a_: Optional[int] = None, a_: Optional[bool] = None, a_: Optional[Union[str, TensorType]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = isinstance(a_, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_snake_case : Optional[int] = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : Tuple = [np.asarray(a_, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
_snake_case : Union[str, Any] = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_snake_case : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
_snake_case : Any = self.feature_size
# convert into correct format for padding
if is_target:
_snake_case : str = [self._extract_mel_features(a_ ) for waveform in speech]
_snake_case : Dict = BatchFeature({"""input_values""": features} )
_snake_case : Tuple = self.num_mel_bins
else:
_snake_case : Tuple = BatchFeature({"""input_values""": speech} )
_snake_case : Union[str, Any] = self.pad(
a_, padding=a_, max_length=a_, truncation=a_, pad_to_multiple_of=a_, return_attention_mask=a_, **a_, )
_snake_case : int = feature_size_hack
# convert input values to correct format
_snake_case : List[str] = padded_inputs["""input_values"""]
if not isinstance(input_values[0], np.ndarray ):
_snake_case : Optional[Any] = [np.asarray(a_, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_snake_case : str = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_snake_case : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
_snake_case : Optional[Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_snake_case : Dict = [np.asarray(a_, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_snake_case : str = (
attention_mask
if self._get_padding_strategies(a_, max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_snake_case : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""], attention_mask=a_, padding_value=self.padding_value )
if return_tensors is not None:
_snake_case : Union[str, Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_snake_case : List[str] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 64 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(__A ), len(grid[0] )
if (
min(__A, __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(__A, row + 1, __A, __A )
count += depth_first_search(__A, row - 1, __A, __A )
count += depth_first_search(__A, __A, col + 1, __A )
count += depth_first_search(__A, __A, col - 1, __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowercase = 5000 ):
'''simple docstring'''
snake_case_ :Any = [(i * (3 * i - 1)) // 2 for i in range(1, _lowercase )]
for i, pentagonal_i in enumerate(_lowercase ):
for j in range(_lowercase, len(_lowercase ) ):
snake_case_ :List[str] = pentagonal_nums[j]
snake_case_ :Dict = pentagonal_i + pentagonal_j
snake_case_ :str = pentagonal_j - pentagonal_i
if is_pentagonal(_lowercase ) and is_pentagonal(_lowercase ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 66 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = F'facebook/wmt19-{pair}'
A__ = self.get_tokenizer(lowercase )
A__ = self.get_model(lowercase )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A__ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 68 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 69 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: List[Any] = LEDTokenizer
_lowercase: Dict = LEDTokenizerFast
_lowercase: List[str] = True
def lowercase__ ( self : Optional[int] ) -> str:
super().setUp()
_lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowercase__ ( self : List[Any] , **__snake_case : str ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : int , **__snake_case : Optional[int] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowercase__ ( self : str ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertNotIn("""labels""" , __snake_case )
self.assertNotIn("""decoder_attention_mask""" , __snake_case )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(text_target=__snake_case , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowercase__ ( self : Tuple ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=__snake_case , truncation=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = ["""A long paragraph for summarization."""]
_lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = tokenizer(text_target=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = inputs["""input_ids"""]
_lowerCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase__ ( self : str ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = ["""Summary of the text.""", """Another summary."""]
_lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case )
_lowerCAmelCase = [[0] * len(__snake_case ) for x in encoded_output["""input_ids"""]]
_lowerCAmelCase = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __snake_case )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
pass
def lowercase__ ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
_lowerCAmelCase = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 70 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ :int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
A_ :List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A_ :Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : Optional[str] =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
UpperCamelCase__ : Optional[str] =field(default=a , metadata={"""help""": """A folder containing the training data."""} )
UpperCamelCase__ : Optional[str] =field(default=a , metadata={"""help""": """A folder containing the validation data."""} )
UpperCamelCase__ : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCamelCase__ : int =field(default=3_2 , metadata={"""help""": """The size of the square patches to use for masking."""} )
UpperCamelCase__ : float =field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ={}
if self.train_dir is not None:
__UpperCamelCase : Dict =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase : Any =self.validation_dir
__UpperCamelCase : Dict =data_files if data_files else None
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : str =field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
UpperCamelCase__ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ : str =field(default=a , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase__ : bool =field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={"""help""": """Stride to use for the encoder."""} , )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__=192 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=0.6 ):
"""simple docstring"""
__UpperCamelCase : int =input_size
__UpperCamelCase : Any =mask_patch_size
__UpperCamelCase : List[Any] =model_patch_size
__UpperCamelCase : Union[str, Any] =mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__UpperCamelCase : Any =self.input_size // self.mask_patch_size
__UpperCamelCase : Dict =self.mask_patch_size // self.model_patch_size
__UpperCamelCase : List[Any] =self.rand_size**2
__UpperCamelCase : List[Any] =int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
"""simple docstring"""
__UpperCamelCase : str =np.random.permutation(self.token_count )[: self.mask_count]
__UpperCamelCase : Optional[int] =np.zeros(self.token_count , dtype=lowerCamelCase__ )
__UpperCamelCase : int =1
__UpperCamelCase : Any =mask.reshape((self.rand_size, self.rand_size) )
__UpperCamelCase : Tuple =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( a_ ) -> int:
__UpperCamelCase : List[str] =torch.stack([example['pixel_values'] for example in examples] )
__UpperCamelCase : Dict =torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase : str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' ,a_ ,a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] =training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase : Any =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase : int =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase : Union[str, Any] =load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase : int =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,a_ ) and data_args.train_val_split > 0.0:
__UpperCamelCase : int =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase : Optional[Any] =split['train']
__UpperCamelCase : List[str] =split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase : Dict ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(model_args.config_name_or_path ,**a_ )
elif model_args.model_name_or_path:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__UpperCamelCase : Any =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(a_ ,'decoder_type' ):
__UpperCamelCase : List[str] ='simmim'
# adapt config
__UpperCamelCase : Dict =model_args.image_size if model_args.image_size is not None else config.image_size
__UpperCamelCase : Optional[Any] =model_args.patch_size if model_args.patch_size is not None else config.patch_size
__UpperCamelCase : int =(
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase : Optional[Any] =AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**a_ )
elif model_args.model_name_or_path:
__UpperCamelCase : Tuple =AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__UpperCamelCase : List[str] ={
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__UpperCamelCase : Dict =IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__UpperCamelCase : List[Any] =AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('Training new model from scratch' )
__UpperCamelCase : Tuple =AutoModelForMaskedImageModeling.from_config(a_ )
if training_args.do_train:
__UpperCamelCase : Union[str, Any] =ds['train'].column_names
else:
__UpperCamelCase : Dict =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase : Optional[Any] =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase : List[Any] ='image'
elif "img" in column_names:
__UpperCamelCase : Optional[int] ='img'
else:
__UpperCamelCase : List[Any] =column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__UpperCamelCase : int =Compose(
[
Lambda(lambda a_ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
# create mask generator
__UpperCamelCase : Tuple =MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(a_ ):
__UpperCamelCase : Dict =[transforms(a_ ) for image in examples[image_column_name]]
__UpperCamelCase : int =[mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase : Optional[int] =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase : int =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a_ )
# Initialize our trainer
__UpperCamelCase : int =Trainer(
model=a_ ,args=a_ ,train_dataset=ds['train'] if training_args.do_train else None ,eval_dataset=ds['validation'] if training_args.do_eval else None ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
__UpperCamelCase : Optional[Any] =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase : List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase : List[Any] =last_checkpoint
__UpperCamelCase : List[str] =trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase : Optional[int] =trainer.evaluate()
trainer.log_metrics('eval' ,a_ )
trainer.save_metrics('eval' ,a_ )
# Write model card and (optionally) push to hub
__UpperCamelCase : Tuple ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
if __name__ == "__main__":
main()
| 71 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : tuple[int, int], A_ : tuple[int, int], A_ : bool, ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Dict = grid.shape
_lowerCamelCase : int = [-1, 1, 0, 0]
_lowerCamelCase : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCamelCase , _lowerCamelCase : str = [(0, source)], set()
_lowerCamelCase : int = np.full((rows, cols), np.inf )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = np.empty((rows, cols), dtype=A_ )
_lowerCamelCase : List[str] = None
while queue:
((_lowerCamelCase) , (_lowerCamelCase)) : Any = heappop(A_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCamelCase : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
_lowerCamelCase , _lowerCamelCase : List[str] = predecessors[x, y]
path.append(A_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A_ ) ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCamelCase : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A_, (dist + 1, (nx, ny)) )
_lowerCamelCase : Dict = dist + 1
_lowerCamelCase : str = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = value_function
SCREAMING_SNAKE_CASE : List[str] = unet
SCREAMING_SNAKE_CASE : Dict = scheduler
SCREAMING_SNAKE_CASE : Any = env
SCREAMING_SNAKE_CASE : Dict = env.get_dataset()
SCREAMING_SNAKE_CASE : List[str] = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : Dict = self.data[key].mean()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : Dict = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : List[str] = env.observation_space.shape[0]
SCREAMING_SNAKE_CASE : List[Any] = env.action_space.shape[0]
def __UpperCamelCase ( self : Dict , a : Any , a : Optional[int] ) -> int:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self : Any , a : List[str] , a : Optional[int] ) -> List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self : Union[str, Any] , a : int ) -> List[str]:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def __UpperCamelCase ( self : List[Any] , a : Optional[Any] , a : List[Any] , a : List[str] ) -> Dict:
"""simple docstring"""
for key, val in cond.items():
SCREAMING_SNAKE_CASE : List[Any] = val.clone()
return x_in
def __UpperCamelCase ( self : Optional[int] , a : str , a : List[str] , a : Any , a : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
SCREAMING_SNAKE_CASE : Tuple = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
SCREAMING_SNAKE_CASE : List[str] = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
SCREAMING_SNAKE_CASE : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
SCREAMING_SNAKE_CASE : Dict = self.scheduler._get_variance(a )
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * posterior_variance )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_std * grad
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[str] = x.detach()
SCREAMING_SNAKE_CASE : Any = x + scale * grad
SCREAMING_SNAKE_CASE : Dict = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(a , a , a , predict_epsilon=a )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
SCREAMING_SNAKE_CASE : Dict = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Tuple = self.to_torch(a )
return x, y
def __call__( self : Dict , a : int , a : int=64 , a : Union[str, Any]=32 , a : str=2 , a : Tuple=0.1 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.normalize(a , "observations" )
SCREAMING_SNAKE_CASE : List[Any] = obs[None].repeat(a , axis=0 )
SCREAMING_SNAKE_CASE : str = {0: self.to_torch(a )}
SCREAMING_SNAKE_CASE : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
SCREAMING_SNAKE_CASE : str = randn_tensor(a , device=self.unet.device )
SCREAMING_SNAKE_CASE : List[Any] = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Dict = self.to_torch(a )
# run the diffusion process
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
SCREAMING_SNAKE_CASE : int = y.argsort(0 , descending=a ).squeeze()
SCREAMING_SNAKE_CASE : Union[str, Any] = x[sorted_idx]
SCREAMING_SNAKE_CASE : List[Any] = sorted_values[:, :, : self.action_dim]
SCREAMING_SNAKE_CASE : Dict = actions.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = self.de_normalize(a , key="actions" )
# select the action with the highest value
if y is not None:
SCREAMING_SNAKE_CASE : Tuple = 0
else:
# if we didn't run value guiding, select a random action
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randint(0 , a )
SCREAMING_SNAKE_CASE : Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions | 76 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = "examples/"
_UpperCamelCase : Tuple = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_UpperCamelCase : Any = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
_UpperCamelCase : Union[str, Any] = "README.md"
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ : Optional[Any] = f.read()
lowercase__ , lowercase__ : Tuple = REPLACE_PATTERNS[pattern]
lowercase__ : int = replace.replace('VERSION' , _lowerCAmelCase )
lowercase__ : Optional[int] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='examples' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = '🤗 Transformers currently provides the following architectures'
lowercase__ : int = '1. Want to contribute a new model?'
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ : List[Any] = f.readlines()
# Find the start of the list.
lowercase__ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase__ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowercase__ : Optional[Any] = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(_lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowercase__ : List[str] = f.read()
lowercase__ : int = REPLACE_PATTERNS['init'][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Optional[Any]=False ):
'''simple docstring'''
lowercase__ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowercase__ : List[Any] = default_version.base_version
elif patch:
lowercase__ : Any = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase__ : List[Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase__ : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
lowercase__ : Dict = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = get_version()
lowercase__ : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase__ : int = current_version.base_version
# Check with the user we got that right.
lowercase__ : Dict = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
lowercase__ : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_UpperCamelCase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 77 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__lowercase ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__lowercase , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 79 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[Any] = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase_ : Dict = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"""emoji""": True,
},
}
]
lowerCamelCase_ : List[Any] = 0
for log in Path().glob("""*.log"""):
lowerCamelCase_ : List[Any] = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase_ : List[Any] = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase_ : Any = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase_ : List[Any] = F'{line["duration"]:.4f}'
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ : List[str] = []
log.unlink()
lowerCamelCase_ : List[Any] = """"""
lowerCamelCase_ : List[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[int] = {}
for test in failed_tests:
lowerCamelCase_ : Union[str, Any] = test[0].split("""::""")
lowerCamelCase_ : Union[str, Any] = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase_ : str = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ : Optional[Any] = [test[0] for test in failed_table]
lowerCamelCase_ : str = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ : int = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ : str = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase_ : int = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase_ : str = len(err) + 1_0
lowerCamelCase_ : Optional[Any] = message[: 3_0_0_0 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
lowerCamelCase_ : Optional[Any] = """No failed tests! 🤗"""
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase_ : List[Any] = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase_ : Any = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase_ : Dict = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ : Optional[Any] = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ : Optional[int] = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase_ : Union[str, Any] = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ : int = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ : str = row[0]
else:
lowerCamelCase_ : str = """"""
lowerCamelCase_ : Dict = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 81 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ = 16
A__ = 32
def _UpperCAmelCase ( snake_case , snake_case = 16 , snake_case = "bert-base-cased" ):
"""simple docstring"""
_lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case )
_lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
_lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
model.eval()
_lowerCAmelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case ) - 1:
_lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case , references=snake_case , )
_lowerCAmelCase = metric.compute()
return eval_metric["accuracy"]
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config["""lr"""]
_lowerCAmelCase = int(config["""num_epochs"""] )
_lowerCAmelCase = int(config["""seed"""] )
_lowerCAmelCase = int(config["""batch_size"""] )
_lowerCAmelCase = args.model_name_or_path
set_seed(snake_case )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(snake_case , snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , )
else:
_lowerCAmelCase = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
_lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
_lowerCAmelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase = int(snake_case ) + 1
_lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
accelerator.print("""resumed checkpoint performance:""" , snake_case )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
_lowerCAmelCase = json.load(snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase = {}
for epoch in range(snake_case , snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase = F'epoch_{epoch}'
_lowerCAmelCase = os.path.join(args.output_dir , snake_case )
accelerator.save_state(snake_case )
_lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
_lowerCAmelCase = accuracy
_lowerCAmelCase = lr_scheduler.get_lr()[0]
_lowerCAmelCase = optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase = epoch
_lowerCAmelCase = overall_step
accelerator.print(F'epoch {epoch}:' , snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(snake_case , snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case , )
parser.add_argument(
"""--output_dir""" , type=snake_case , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=snake_case , default=snake_case , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=snake_case , default=snake_case , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case , default=2 , help="""Number of train epochs.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 82 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
"""simple docstring"""
import sys
def _snake_case ( lowercase__ : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = len(lowercase__ )
lowerCAmelCase_ :Dict = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
lowerCAmelCase_ :Any = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase_ :str = a + chain_length - 1
lowerCAmelCase_ :Dict = sys.maxsize
for c in range(lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase_ :Any = cost
lowerCAmelCase_ :Optional[int] = c
return matrix, sol
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
if i == j:
print("""A""" + str(lowercase__ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(""")""" , end=""" """ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowerCAmelCase_ :Any = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase_ , lowerCAmelCase_ :Dict = matrix_chain_order(lowercase__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 84 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : str = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = {}
if "threshold" in kwargs:
__lowerCAmelCase : int = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = load_image(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.IntTensor([[image.height, image.width]] )
__lowerCAmelCase : int = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__lowerCAmelCase : Tuple = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__lowerCAmelCase : str = target_size
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('target_size' )
__lowerCAmelCase : int = self.model(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCAmelCase : Dict = model_inputs['bbox']
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 ):
__lowerCAmelCase : Union[str, Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCAmelCase , __lowerCAmelCase : int = target_size[0].tolist()
def unnormalize(_SCREAMING_SNAKE_CASE ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCAmelCase : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCAmelCase : Any = [unnormalize(_SCREAMING_SNAKE_CASE ) for bbox in model_outputs['bbox'].squeeze(0 )]
__lowerCAmelCase : List[str] = ['score', 'label', 'box']
__lowerCAmelCase : Tuple = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for vals in zip(scores.tolist() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCAmelCase : Tuple = self.image_processor.post_process_object_detection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = raw_annotations[0]
__lowerCAmelCase : Dict = raw_annotation['scores']
__lowerCAmelCase : Dict = raw_annotation['labels']
__lowerCAmelCase : int = raw_annotation['boxes']
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCAmelCase : Optional[int] = [self._get_bounding_box(_SCREAMING_SNAKE_CASE ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCAmelCase : List[Any] = ['score', 'label', 'box']
__lowerCAmelCase : str = [
dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = box.int().tolist()
__lowerCAmelCase : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 86 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase : str = ""):
lowercase__ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : int = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser")
lowercase__ : int = soup.find_all("td" , attrs="titleColumn")
lowercase__ : Any = soup.find_all("td" , class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(_lowerCamelCase , _lowerCamelCase)
}
def lowercase_ ( _lowerCamelCase : str = "IMDb_Top_250_Movies.csv"):
lowercase__ : Dict = get_imdb_top_aaa_movies()
with open(_lowerCamelCase , "w" , newline="") as out_file:
lowercase__ : Optional[int] = csv.writer(_lowerCamelCase)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = abs(A_ )
__magic_name__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = abs(A_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a__ ( A_ ):
'''simple docstring'''
return sum(int(A_ ) for c in str(abs(A_ ) ) )
def a__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A_, A_ ) -> None:
__magic_name__ = f'''{func.__name__}({value})'''
__magic_name__ = timeit(f'''__main__.{call}''', setup="""import __main__""" )
print(f'''{call:56} = {func(A_ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A_, A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 88 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : List[str] = 0
for ch in input_str:
_a : Optional[Any] = ord(lowerCAmelCase_ )
_a : Tuple = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
# setable values
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
return cls(common=lowerCamelCase__ , init_noise_sigma=lowerCamelCase__ , timesteps=lowerCamelCase__ )
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
snake_case_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case_ = 42
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowerCamelCase__ = 1_000 , lowerCamelCase__ = 0.00_01 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "fixed_small" , lowerCamelCase__ = True , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = jnp.floataa , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = dtype
def lowercase_ ( self , lowerCamelCase__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__lowerCamelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase__ , init_noise_sigma=lowerCamelCase__ , timesteps=lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
__lowerCamelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (jnp.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase__ , timesteps=lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase = jnp.clip(lowerCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase = jnp.log(jnp.clip(lowerCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__lowerCamelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase = variance
__lowerCamelCase = state.common.betas[t]
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if key is None:
__lowerCamelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase , __lowerCamelCase = jnp.split(lowerCamelCase__ , sample.shape[1] , axis=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = jnp.clip(lowerCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase = jax.random.split(lowerCamelCase__ , num=1 )
__lowerCamelCase = jax.random.normal(lowerCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase__ , lowerCamelCase__ , predicted_variance=lowerCamelCase__ ) ** 0.5) * noise
__lowerCamelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase__ , state=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 90 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
from math import isqrt, loga
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __a , __a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
return [i for i in range(2 , __a ) if is_prime[i]]
def _A (__a = 80_08_00 , __a = 80_08_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = degree * loga(__a )
SCREAMING_SNAKE_CASE_ : str = int(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = calculate_prime_numbers(__a )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = len(__a ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def _a ( ):
__lowerCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _a ( ):
return __name__.split("." )[0]
def _a ( ):
return logging.getLogger(_get_library_name() )
def _a ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
__lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowerCAmelCase = False
def _a ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowerCAmelCase = None
def _a ( ):
return log_levels
def _a ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if name is None:
__lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _a ( SCREAMING_SNAKE_CASE_ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _a ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE_ )
def _a ( ):
_configure_library_root_logger()
__lowerCAmelCase = False
def _a ( ):
_configure_library_root_logger()
__lowerCAmelCase = True
def _a ( ):
__lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
__lowerCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def _a ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE_ )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE_ )
def _a ( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = warning_once
class a__ :
def __init__( self , *_A , **_A ): # pylint: disable=unused-argument
"""simple docstring"""
__lowerCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _A ):
"""simple docstring"""
def empty_fn(*_A , **_A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , _A , _A , _A ):
"""simple docstring"""
return
class a__ :
def __call__( self , *_A , **_A ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
__lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def _a ( ):
global _tqdm_active
return bool(_tqdm_active )
def _a ( ):
global _tqdm_active
__lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def _a ( ):
global _tqdm_active
__lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 92 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[Any] = [i[0] for i in r], [i[1] for i in r]
lowercase_ : List[str] = list(accumulate(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
snake_case : Dict = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 94 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
"""simple docstring"""
lowercase__ = range(2, 20 + 1)
lowercase__ = [10**k for k in range(ks[-1] + 1)]
lowercase__ = {}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
_lowerCamelCase : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
_lowerCamelCase, _lowerCamelCase : Dict = 0, 0
_lowerCamelCase : List[str] = n - i
_lowerCamelCase : Optional[int] = memo.get(lowercase__ )
if sub_memo is not None:
_lowerCamelCase : Tuple = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : Optional[Any] = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Dict = _k
break
if max_jump >= 0:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : Any = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
_lowerCamelCase, _lowerCamelCase : Tuple = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCamelCase : List[Any] = []
else:
_lowerCamelCase : List[str] = {c: []}
_lowerCamelCase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase, _lowerCamelCase : Any = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase, _lowerCamelCase : str = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
_lowerCamelCase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : Dict = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : int = i
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : Optional[int] = 0
for j in range(lowercase__ ):
_lowerCamelCase : Dict = a_i[j] + addend
_lowerCamelCase, _lowerCamelCase : List[Any] = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
_lowerCamelCase : int = digits[j] + addend
if s >= 10:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
_lowerCamelCase : Union[str, Any] = addend // 10 + quotient
else:
_lowerCamelCase : Dict = s
_lowerCamelCase : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def _snake_case ( lowercase__ = 10**15 ):
_lowerCamelCase : Optional[Any] = [1]
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase, _lowerCamelCase : Dict = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : int = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
'''simple docstring'''
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[int] = 2
while i * i <= n:
UpperCamelCase__ :Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a ( ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 97 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Dict = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : Any = {
'gpt-neox-20b': 2_048,
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : int="<|endoftext|>" ,lowerCamelCase__ : Dict="<|endoftext|>" ,lowerCamelCase__ : int="<|endoftext|>" ,lowerCamelCase__ : Any=False ,**lowerCamelCase__ : List[str] ,):
super().__init__(
lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,lowerCamelCase__ ) != add_prefix_space:
UpperCAmelCase__ = getattr(lowerCamelCase__ ,pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**lowerCamelCase__ )
UpperCAmelCase__ = add_prefix_space
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : "Conversation" ):
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 98 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = tempfile.mkdtemp()
# fmt: off
a__ : Optional[Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__ : List[Any] = dict(zip(lowercase , range(len(lowercase))))
a__ : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a__ : Dict = {'unk_token': '<unk>'}
a__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
a__ : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict = os.path.join(self.tmpdirname , lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowercase , lowercase)
def __lowercase ( self , **lowercase) -> Tuple:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **lowercase)
def __lowercase ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **lowercase)
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : List[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Tuple = self.get_rust_tokenizer()
a__ : Any = self.get_image_processor()
a__ : Optional[int] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_slow.save_pretrained(self.tmpdirname)
a__ : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase)
a__ : Optional[int] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_fast.save_pretrained(self.tmpdirname)
a__ : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase)
self.assertIsInstance(processor_fast.tokenizer , lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase)
self.assertIsInstance(processor_fast.image_processor , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : List[Any] = self.get_image_processor(do_normalize=lowercase)
a__ : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Dict = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Union[str, Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Any = self.prepare_image_inputs()
a__ : int = image_processor(lowercase , return_tensors='np')
a__ : Optional[Any] = processor(images=lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : str = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[Any] = 'lower newer'
a__ : str = processor(text=lowercase , return_tensors='np')
a__ : Dict = tokenizer(lowercase , return_tensors='np')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Tuple = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = 'lower newer'
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Tuple = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = 'google/owlvit-base-patch32'
a__ : int = OwlViTProcessor.from_pretrained(lowercase)
a__ : Dict = ['cat', 'nasa badge']
a__ : int = processor(text=lowercase)
a__ : Optional[int] = 16
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = 'google/owlvit-base-patch32'
a__ : Any = OwlViTProcessor.from_pretrained(lowercase)
a__ : Optional[int] = [['cat', 'nasa badge'], ['person']]
a__ : Optional[int] = processor(text=lowercase)
a__ : Tuple = 16
a__ : List[str] = len(lowercase)
a__ : Union[str, Any] = max([len(lowercase) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = 'google/owlvit-base-patch32'
a__ : int = OwlViTProcessor.from_pretrained(lowercase)
a__ : Optional[int] = ['cat', 'nasa badge']
a__ : str = processor(text=lowercase)
a__ : Optional[Any] = 16
a__ : Any = inputs['input_ids']
a__ : Optional[Any] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : List[Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : int = self.prepare_image_inputs()
a__ : int = processor(images=lowercase , query_images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['query_pixel_values', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : List[str] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : int = processor.batch_decode(lowercase)
a__ : str = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
| 99 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''time_series_transformer'''
__lowercase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(lowerCAmelCase__) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 100 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.