code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : List[str] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Dict = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : str = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = [text_path]
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = TextDatasetReader({'train': text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader({'train': text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
if split:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : str = 'train'
SCREAMING_SNAKE_CASE_ : int = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 345
| 1
|
import sys
from collections import defaultdict
class snake_case_ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
UpperCAmelCase__ =[]
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
return self.node_position[vertex]
def __UpperCAmelCase ( self, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =pos
def __UpperCAmelCase ( self, A_, A_, A_, A_ ) -> Tuple:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase__ =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase__ =2 * start + 1
else:
UpperCAmelCase__ =2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase__ , UpperCAmelCase__ =heap[smallest_child], positions[smallest_child]
UpperCAmelCase__ , UpperCAmelCase__ =(
heap[start],
positions[start],
)
UpperCAmelCase__ , UpperCAmelCase__ =temp, tempa
UpperCAmelCase__ =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], __a )
self.top_to_bottom(__a, __a, __a, __a )
def __UpperCAmelCase ( self, A_, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =position[index]
while index != 0:
UpperCAmelCase__ =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase__ =heap[parent]
UpperCAmelCase__ =position[parent]
self.set_position(position[parent], __a )
else:
UpperCAmelCase__ =val
UpperCAmelCase__ =temp
self.set_position(__a, __a )
break
UpperCAmelCase__ =parent
else:
UpperCAmelCase__ =val
UpperCAmelCase__ =temp
self.set_position(__a, 0 )
def __UpperCAmelCase ( self, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =len(__a ) // 2 - 1
for i in range(__a, -1, -1 ):
self.top_to_bottom(__a, __a, len(__a ), __a )
def __UpperCAmelCase ( self, A_, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =positions[0]
UpperCAmelCase__ =sys.maxsize
self.top_to_bottom(__a, 0, len(__a ), __a )
return temp
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =Heap()
UpperCAmelCase__ =[0] * len(__snake_case )
UpperCAmelCase__ =[-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase__ =[] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase__ =[]
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
UpperCAmelCase__ =[]
UpperCAmelCase__ =1
UpperCAmelCase__ =sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase__ =0
UpperCAmelCase__ =distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
UpperCAmelCase__ =heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase__ =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
UpperCAmelCase__ =distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
UpperCAmelCase__ =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase_ = int(input('Enter number of edges: ').strip())
UpperCamelCase_ = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 712
|
import string
from math import logaa
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCAmelCase__ =document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ =corpus_without_punctuation.split("\n" )
UpperCAmelCase__ =term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def _UpperCAmelCase ( A , A , A=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return round(tf * idf , 3 )
| 510
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_a = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_a = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_a = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase = len([g for position, g in enumerate(__snake_case ) if g == main_target[position]] )
return (item, float(__snake_case ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase = random.randint(0, len(__snake_case ) - 1 )
_UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = list(__snake_case )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
_UpperCamelCase = random.choice(__snake_case )
return "".join(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase = int(parent_a[1] * 1_00 ) + 1
_UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(__snake_case ):
_UpperCamelCase = population_score[random.randint(0, __snake_case )][0]
_UpperCamelCase , _UpperCamelCase = crossover(parent_a[0], __snake_case )
# Append new string to the population list.
pop.append(mutate(__snake_case, __snake_case ) )
pop.append(mutate(__snake_case, __snake_case ) )
return pop
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__snake_case )
# Generate random starting population.
_UpperCamelCase = []
for _ in range(__snake_case ):
population.append(''''''.join([random.choice(__snake_case ) for i in range(len(__snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase , _UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase = [evaluate(__snake_case, __snake_case ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase = sorted(__snake_case, key=lambda __snake_case : x[1], reverse=__snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__snake_case )
# Normalize population score to be between 0 and 1.
_UpperCamelCase = [
(item, score / len(__snake_case )) for item, score in population_score
]
# This is selection
for i in range(__snake_case ):
population.extend(select(population_score[int(__snake_case )], __snake_case, __snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
_a = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
_a = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
_a , _a , _a = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 19
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
while repunit:
SCREAMING_SNAKE_CASE = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 450
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__A : Optional[int] = 2_5_6_0_4_7
__A : Dict = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
a__ = NllbTokenizer
a__ = NllbTokenizerFast
a__ = True
a__ = True
a__ = {}
def _UpperCAmelCase ( self : Any ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = NllbTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : int ) -> str:
SCREAMING_SNAKE_CASE = NllbTokenizer(a , keep_accents=a )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _UpperCAmelCase ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(a , legacy_format=a )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(a )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@require_torch
def _UpperCAmelCase ( self : Dict ) -> List[str]:
if not self.test_seqaseq:
return
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=a , tgt_texts=a , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
a , tgt_texts=a , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=a , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , a )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _UpperCAmelCase ( self : int ) -> Optional[int]:
pass
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = [AddedToken("""<special>""" , lstrip=a )]
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a )
SCREAMING_SNAKE_CASE = tokenizer_r.encode("""Hey this is a <special> token""" )
SCREAMING_SNAKE_CASE = tokenizer_r.encode("""<special>""" , add_special_tokens=a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a , )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(
a , additional_special_tokens=a , **a )
SCREAMING_SNAKE_CASE = tokenizer_p.encode("""Hey this is a <special> token""" )
SCREAMING_SNAKE_CASE = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''facebook/nllb-200-distilled-600M'''
a__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a__ = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _UpperCAmelCase ( cls : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
SCREAMING_SNAKE_CASE = 1
return cls
def _UpperCAmelCase ( self : Dict ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256_057 )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.assertIn(a , self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
SCREAMING_SNAKE_CASE = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , a )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a )
self.assertEqual(len(a ) , a )
def _UpperCAmelCase ( self : Dict ) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256_203, 3] )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(a , a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = targets["""input_ids"""]
SCREAMING_SNAKE_CASE = shift_tokens_right(
a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256_047, 70, 7_356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256_057,
} , )
@require_torch
def _UpperCAmelCase ( self : str ) -> int:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 450
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__magic_name__ : Dict = logging.getLogger(__name__)
def lowercase__ ( _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase = 10 , _UpperCamelCase = 2) -> Dict:
"""simple docstring"""
def get_dataset(_UpperCamelCase):
UpperCamelCase = torch.randn(batch_size * n_batches , 1)
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1))
UpperCamelCase = get_dataset(lowercase_)
UpperCamelCase = get_dataset(lowercase_)
UpperCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4)
UpperCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4)
return (train_dataloader, valid_dataloader)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(lowercase_):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(lowercase_)
UpperCamelCase = torch.nn.functional.mse_loss(lowercase_ , lowercase_)
accelerator.backward(lowercase_)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return x * self.a + self.b
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
UpperCamelCase = Accelerator(project_config=snake_case_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
UpperCamelCase = os.path.join(snake_case_ , 'initial' )
accelerator.save_state(snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
UpperCamelCase = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
UpperCamelCase = os.path.join(snake_case_ , 'checkpoint' )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
UpperCamelCase = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
UpperCamelCase = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
UpperCamelCase = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
__magic_name__ : List[str] = '''/tmp/accelerate/state_checkpointing'''
__magic_name__ : str = DummyModel()
__magic_name__ : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__magic_name__ : Tuple = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__magic_name__ : Union[str, Any] = dummy_dataloaders()
__magic_name__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__magic_name__ : Optional[int] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__magic_name__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__magic_name__ : Tuple = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__magic_name__ : Optional[Any] = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__magic_name__ : List[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__magic_name__ : Any = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__magic_name__ : int = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 280
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_UpperCAmelCase : str = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_UpperCAmelCase : Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : int="dummy_doc" ) -> str:
'''simple docstring'''
lowercase ={doc: key_lines}
lowercase ={doc: sys_lines}
lowercase ={}
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase =0
lowercase , lowercase =reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase =reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
lowercase , lowercase =reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase =reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
lowercase , lowercase =reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase =reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase =reader.get_mention_assignments(lowercase_ , lowercase_ )
lowercase =reader.get_mention_assignments(lowercase_ , lowercase_ )
lowercase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Tuple ) -> Dict:
'''simple docstring'''
lowercase =get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase ={}
lowercase =0
lowercase =0
for name, metric in metrics:
lowercase , lowercase , lowercase =evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , f'Recall: {recall * 1_0_0:.2f}' , f' Precision: {precision * 1_0_0:.2f}' , f' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
lowercase =(conll / 3) * 1_0_0
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def UpperCamelCase ( lowercase_ : Any ) -> List[Any]:
'''simple docstring'''
lowercase =False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowercase =line.split()[5]
if not parse_col == "-":
lowercase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False ):
lowercase =[
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowercase =util.check_gold_parse_annotation(snake_case_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase =evaluate(
key_lines=snake_case_ , sys_lines=snake_case_ , metrics=snake_case_ , NP_only=snake_case_ , remove_nested=snake_case_ , keep_singletons=snake_case_ , min_span=snake_case_ , )
return score
| 72
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = AudioLDMPipeline
UpperCAmelCase = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_a , )
_SCREAMING_SNAKE_CASE =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_SCREAMING_SNAKE_CASE =ClapTextModelWithProjection(_a )
_SCREAMING_SNAKE_CASE =RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
_SCREAMING_SNAKE_CASE =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_a , )
_SCREAMING_SNAKE_CASE =SpeechTaHifiGan(_a )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __UpperCamelCase ( self : Tuple , _a : List[Any] , _a : str=0 ) -> Tuple:
"""simple docstring"""
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_SCREAMING_SNAKE_CASE =audio[:10]
_SCREAMING_SNAKE_CASE =np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * [inputs['''prompt''']]
# forward
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('''prompt''' )]
_SCREAMING_SNAKE_CASE =audioldm_pipe.tokenizer(
_a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =text_inputs['''input_ids'''].to(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.text_encoder(
_a , )
_SCREAMING_SNAKE_CASE =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_SCREAMING_SNAKE_CASE =F.normalize(_a , dim=-1 )
_SCREAMING_SNAKE_CASE =prompt_embeds
# forward
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * ['''this is a negative prompt''']
_SCREAMING_SNAKE_CASE =negative_prompt
_SCREAMING_SNAKE_CASE =3 * [inputs['''prompt''']]
# forward
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('''prompt''' )]
_SCREAMING_SNAKE_CASE =[]
for p in [prompt, negative_prompt]:
_SCREAMING_SNAKE_CASE =audioldm_pipe.tokenizer(
_a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =text_inputs['''input_ids'''].to(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.text_encoder(
_a , )
_SCREAMING_SNAKE_CASE =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_SCREAMING_SNAKE_CASE =F.normalize(_a , dim=-1 )
embeds.append(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =embeds
# forward
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE ='''egg cracking'''
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a , negative_prompt=_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_SCREAMING_SNAKE_CASE =audio[:10]
_SCREAMING_SNAKE_CASE =np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
_SCREAMING_SNAKE_CASE =audioldm_pipe(_a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =audioldm_pipe(_a , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.vocoder.config.sampling_rate
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe(audio_length_in_s=0.0_16 , **_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.0_16
_SCREAMING_SNAKE_CASE =audioldm_pipe(audio_length_in_s=0.0_32 , **_a )
_SCREAMING_SNAKE_CASE =output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.0_32
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =AudioLDMPipeline(**_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =['''hey''']
_SCREAMING_SNAKE_CASE =audioldm_pipe(_a , num_inference_steps=1 )
_SCREAMING_SNAKE_CASE =output.audios.shape
assert audio_shape == (1, 256)
_SCREAMING_SNAKE_CASE =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_SCREAMING_SNAKE_CASE =SpeechTaHifiGan(_a ).to(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe(_a , num_inference_steps=1 )
_SCREAMING_SNAKE_CASE =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_a )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a )
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] , _a : Tuple , _a : Optional[int]="cpu" , _a : Optional[int]=torch.floataa , _a : Tuple=0 ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE =np.random.RandomState(_a ).standard_normal((1, 8, 128, 16) )
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a ).to(device=_a , dtype=_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_inputs(_a )
_SCREAMING_SNAKE_CASE =25
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_SCREAMING_SNAKE_CASE =audio[7_7230:7_7240]
_SCREAMING_SNAKE_CASE =np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
_SCREAMING_SNAKE_CASE =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_SCREAMING_SNAKE_CASE =audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_inputs(_a )
_SCREAMING_SNAKE_CASE =audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_SCREAMING_SNAKE_CASE =audio[2_7780:2_7790]
_SCREAMING_SNAKE_CASE =np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
_SCREAMING_SNAKE_CASE =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 191
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase( a__):
for param in module.parameters():
_SCREAMING_SNAKE_CASE =False
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_SCREAMING_SNAKE_CASE ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''')
return device
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =plt.imshow(a__)
fig.axes.get_xaxis().set_visible(a__)
fig.axes.get_yaxis().set_visible(a__)
plt.show()
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =datetime.now()
_SCREAMING_SNAKE_CASE =current_time.strftime('''%H:%M:%S''')
return timestamp
| 191
| 1
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 645
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 645
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
a_ : Dict = trt.Logger(trt.Logger.WARNING)
a_ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
a_ : Optional[int] = logging.getLogger(__name__)
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
a_ : str = parser.parse_args()
if args.tokenizer_name:
a_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
a_ : Tuple = args.per_device_eval_batch_size
a_ : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
a_ : Optional[Any] = True
a_ : List[Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
a_ : Any = 'temp_engine/bert-fp16.engine'
if args.inta:
a_ : int = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
a_ : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
a_ : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
a_ : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
a_ : Optional[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
a_ : List[str] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
a_ : List[str] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__magic_name__ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__magic_name__ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ )
# start time
__magic_name__ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__magic_name__ = time.time()
__magic_name__ = end_time - start_time
__magic_name__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
a_ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a_ : Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
a_ : Tuple = raw_datasets['validation'].column_names
a_ : Optional[int] = 'question' if 'question' in column_names else column_names[0]
a_ : List[Any] = 'context' if 'context' in column_names else column_names[1]
a_ : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
a_ : Optional[int] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
a_ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__magic_name__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__magic_name__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__magic_name__ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__magic_name__ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__magic_name__ = tokenized_examples.sequence_ids(snake_case_ )
__magic_name__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__magic_name__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__magic_name__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
a_ : Dict = raw_datasets['validation']
# Validation Feature Creation
a_ : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
a_ : List[str] = default_data_collator
a_ : Tuple = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
a_ : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__magic_name__ = postprocess_qa_predictions(
examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__magic_name__ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__magic_name__ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__magic_name__ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ )
a_ : Any = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize
# Allocate device memory for inputs and outputs.
a_ : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
a_ : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
a_ : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
a_ : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
a_ : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
a_ : Optional[int] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
a_ : Tuple = 0.0
a_ : List[Any] = 0
a_ : int = timeit.default_timer()
a_ : Dict = None
for step, batch in enumerate(eval_dataloader):
a_ : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
a_ : Union[str, Any] = outputs
a_ : Any = torch.tensor(start_logits)
a_ : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
a_ : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
a_ : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
a_ : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
a_ : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
a_ : Optional[Any] = nested_truncate(all_preds, len(eval_dataset))
a_ : Optional[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
a_ : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
a_ : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 700
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : List[str] = 256
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = ["""melgan"""]
def __init__( self , A , A , A , A , A , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__magic_name__ = math.log(1E-5 ) # Matches MelGAN training.
__magic_name__ = 4.0 # Largest value for most examples
__magic_name__ = 1_28
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = output_range
if clip:
__magic_name__ = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
__magic_name__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ = input_range
__magic_name__ = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
__magic_name__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = input_tokens > 0
__magic_name__ , __magic_name__ = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
__magic_name__ , __magic_name__ = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = noise_time
if not torch.is_tensor(A ):
__magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
__magic_name__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__magic_name__ = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(A )}.' )
__magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa )
__magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
__magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__magic_name__ = ones
__magic_name__ = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
__magic_name__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__magic_name__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample
__magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] )
__magic_name__ = mel[:1]
__magic_name__ = mel.cpu().float().numpy()
__magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__magic_name__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A )
| 678
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : int = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_lowercase : Union[str, Any] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_lowercase : Any = 'A painting of a squirrel eating a burger'
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Union[str, Any] = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
_lowercase : Tuple = output.images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : Any = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
_lowercase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowercase : List[str] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : str = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
_lowercase : List[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : Union[str, Any] = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __a ( self ):
_lowercase : int = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowercase : Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : Optional[Any] = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=_lowerCAmelCase , )
_lowercase : Any = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : List[Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 66
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCamelCase__ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ,lowercase_=True ) -> Tuple:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : List[Any] = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
_UpperCamelCase : List[str] = config_class.from_json_file(lowercase_ )
_UpperCamelCase : Any = True
_UpperCamelCase : Tuple = True
print(F'''Building TensorFlow model from configuration: {config}''' )
_UpperCamelCase : str = model_class(lowercase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Optional[int] = cached_file(
lowercase_ ,lowercase_ ,force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : Dict = load_pytorch_checkpoint_in_tfa_model(lowercase_ ,lowercase_ )
if compare_with_pt_model:
_UpperCamelCase : List[str] = tf_model(tf_model.dummy_inputs ,training=lowercase_ ) # build the network
_UpperCamelCase : List[Any] = torch.load(lowercase_ ,map_location="cpu" )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowercase_ ,config=lowercase_ ,state_dict=lowercase_ )
with torch.no_grad():
_UpperCamelCase : List[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : Dict = pto[0].numpy()
_UpperCamelCase : Optional[Any] = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowercase_ ,save_format="h5" )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> Optional[int]:
"""simple docstring"""
if args_model_type is None:
_UpperCamelCase : str = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Optional[int] = [args_model_type]
for j, model_type in enumerate(lowercase_ ,start=1 ):
print("=" * 100 )
print(F''' Converting model type {j}/{len(lowercase_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowercase_ ,lowercase_ ) ,start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_UpperCamelCase : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowercase_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Dict = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
else:
_UpperCamelCase : List[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : Tuple = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
else:
_UpperCamelCase : Optional[Any] = model_shortcut_name
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Any = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowercase_ ,pytorch_checkpoint_path=lowercase_ ,config_file=lowercase_ ,tf_dump_path=os.path.join(lowercase_ ,model_shortcut_name + "-tf_model.h5" ) ,compare_with_pt_model=lowercase_ ,)
if remove_cached_files:
os.remove(lowercase_ )
os.remove(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowerCamelCase__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 624
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : str = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : List[str] = os.path.join(lowercase_ ,"schedule.bin" )
torch.save(scheduler.state_dict() ,lowercase_ )
_UpperCamelCase : Optional[int] = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict , __a : Any , __a : List[str] ) -> List[str]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCamelCase : List[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__a , weight_decay=0.0 , relative_step=__a , scale_parameter=__a , warmup_init=__a , )
for _ in range(1000 ):
_UpperCamelCase : Optional[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[str] = 10
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : List[Any] , __a : Dict , __a : str=None ) -> List[Any]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a , msg=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCamelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
_UpperCamelCase, _UpperCamelCase : List[str] = data
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCamelCase : Optional[int] = unwrap_schedule(__a , self.num_steps )
self.assertListAlmostEqual(
__a , __a , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__a ) # wrap to test picklability of the schedule
_UpperCamelCase : int = unwrap_and_save_reload_schedule(__a , self.num_steps )
self.assertListEqual(__a , __a , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : Any = fn
def __call__( self : Tuple , *__a : Optional[Any] , **__a : str ) -> Any:
return self.fn(*__a , **__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any ) -> Tuple:
_UpperCamelCase : Any = list(map(self , scheduler.lr_lambdas ) )
| 624
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = BioGptTokenizer
__A = False
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Any:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 407
|
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 407
| 1
|
from collections import defaultdict
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def a__ ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =10, 9
lowerCAmelCase__ : str =defaultdict(list)
lowerCAmelCase__ : dict[int, bool] ={}
lowerCAmelCase__ : list[int] =[]
lowerCAmelCase__ : int =0
lowerCAmelCase__ : int =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 101
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=3 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = FalconModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = FalconModel(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
__lowerCAmelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
__lowerCAmelCase = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =(
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] =(FalconForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = FalconModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase , *__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowerCAmelCase = alibi
self.model_tester.create_and_check_model(__a , *__a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "single_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = FalconForCausalLM(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , use_cache=__a )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
__lowerCAmelCase = model._convert_cache_to_standard_format(__a , __a )
for layer in range(len(__a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "multi_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__a )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__a , "use_cache" ):
return
__lowerCAmelCase = model_class(__a ).to(__a )
if "use_cache" not in inputs:
__lowerCAmelCase = True
__lowerCAmelCase = model(**__a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowerCAmelCase = (
getattr(__a , "decoder_layers" , __a )
or getattr(__a , "num_decoder_layers" , __a )
or config.num_hidden_layers
)
__lowerCAmelCase = getattr(__a , "num_kv_heads" , config.num_attention_heads )
__lowerCAmelCase = getattr(__a , "d_model" , config.hidden_size )
__lowerCAmelCase = embed_dim // num_attention_heads
__lowerCAmelCase = outputs["past_key_values"]
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase , __lowerCAmelCase = inputs["input_ids"].shape
for i in range(__a ):
if config.new_decoder_architecture:
__lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
__lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__lowerCAmelCase = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
__lowerCAmelCase = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=19 )
__lowerCAmelCase = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
@slow
def snake_case ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(device=__a )
__lowerCAmelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# Test results are the same with and without cache
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
__lowerCAmelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 636
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase ( __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = "trajectory_transformer"
_SCREAMING_SNAKE_CASE = ["past_key_values"]
_SCREAMING_SNAKE_CASE = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , __snake_case : Dict=1_00 , __snake_case : Tuple=5 , __snake_case : Tuple=1 , __snake_case : List[str]=1 , __snake_case : str=2_49 , __snake_case : Optional[Any]=6 , __snake_case : str=17 , __snake_case : Dict=25 , __snake_case : Union[str, Any]=4 , __snake_case : Any=4 , __snake_case : Dict=1_28 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[Any]=0.0_006 , __snake_case : Any=5_12 , __snake_case : Optional[int]=0.02 , __snake_case : List[Any]=1e-12 , __snake_case : List[str]=1 , __snake_case : List[Any]=True , __snake_case : Dict=1 , __snake_case : List[Any]=5_02_56 , __snake_case : str=5_02_56 , **__snake_case : int , ):
'''simple docstring'''
_snake_case: List[Any] = vocab_size
_snake_case: Optional[Any] = action_weight
_snake_case: Any = reward_weight
_snake_case: str = value_weight
_snake_case: Union[str, Any] = max_position_embeddings
_snake_case: Optional[int] = block_size
_snake_case: int = action_dim
_snake_case: Tuple = observation_dim
_snake_case: Dict = transition_dim
_snake_case: int = learning_rate
_snake_case: Optional[int] = n_layer
_snake_case: Optional[int] = n_head
_snake_case: List[Any] = n_embd
_snake_case: List[Any] = embd_pdrop
_snake_case: Union[str, Any] = attn_pdrop
_snake_case: Dict = resid_pdrop
_snake_case: List[Any] = initializer_range
_snake_case: List[Any] = layer_norm_eps
_snake_case: int = kaiming_initializer_range
_snake_case: Optional[Any] = use_cache
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 720
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A : str = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
_SCREAMING_SNAKE_CASE = 42 # Cache store of keys
_SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
_SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self : List[Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: Dict = deque()
_snake_case: Union[str, Any] = set()
if not n:
_snake_case: Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_snake_case: Tuple = n
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_snake_case: int = self.dq_store.pop()
self.key_reference.remove(__snake_case )
else:
self.dq_store.remove(__snake_case )
self.dq_store.appendleft(__snake_case )
self.key_reference.add(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for k in self.dq_store:
print(__snake_case )
def __repr__( self : List[Any] ):
'''simple docstring'''
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
A : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 273
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''t5'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[Any] , _UpperCAmelCase : Tuple=32128 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : int=64 , _UpperCAmelCase : str=2048 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : int=128 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Tuple=1e-6 , _UpperCAmelCase : int=1.0 , _UpperCAmelCase : Optional[Any]="relu" , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Optional[int]=1 , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction="inputs" )
return common_inputs
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return 13
| 82
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 1
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowercase__ ):
"""simple docstring"""
A__ = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = 8 , **_lowerCAmelCase , ):
super().__init__(**UpperCAmelCase__ )
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_pad
lowerCamelCase__ = pad_size
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ):
lowerCamelCase__ = get_image_size(UpperCAmelCase__ )
lowerCamelCase__ = (old_height // size + 1) * size - old_height
lowerCamelCase__ = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCAmelCase__ )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ = pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowerCamelCase__ = [self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowerCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 700
|
def __UpperCamelCase ( a, a) ->Dict:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(a):
for j in range(a):
if dist[i][j] != float("inf"):
print(int(dist[i][j]), end="\t")
else:
print("INF", end="\t")
print()
def __UpperCamelCase ( a, a) ->Optional[int]:
lowerCamelCase__ = [[float("inf") for _ in range(a)] for _ in range(a)]
for i in range(a):
for j in range(a):
lowerCamelCase__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a):
# looping through rows of graph array
for i in range(a):
# looping through columns of graph array
for j in range(a):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase__ = dist[i][k] + dist[k][j]
_print_dist(a, a)
return dist, v
if __name__ == "__main__":
A_ = int(input("Enter number of vertices: "))
A_ = int(input("Enter number of edges: "))
A_ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
A_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
A_ = int(input("Enter source:"))
A_ = int(input("Enter destination:"))
A_ = float(input("Enter weight:"))
A_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 360
| 0
|
import pprint
import requests
lowercase : Dict = '''https://zenquotes.io/api'''
def lowerCAmelCase__ ( ):
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase__ ( ):
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
lowercase : str = random_quotes()
pprint.pprint(response)
| 568
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowercase : Optional[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase__ ( _a : str ):
with open(_a , "rb" ) as f:
snake_case_ : Tuple = Image.open(_a )
return im.convert("RGB" )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the training data.'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the validation data.'} )
A : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCAmelCase ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A : str = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( _a : Tuple ):
snake_case_ : List[str] = torch.stack([example["pixel_values"] for example in examples] )
snake_case_ : Optional[Any] = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _a , _a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Tuple = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : Dict = {}
if data_args.train_dir is not None:
snake_case_ : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
snake_case_ : List[str] = os.path.join(data_args.validation_dir , "**" )
snake_case_ : int = load_dataset(
"imagefolder" , data_files=_a , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[int] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : Union[str, Any] = dataset["train"].train_test_split(data_args.train_val_split )
snake_case_ : str = split["train"]
snake_case_ : Optional[int] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case_ : Union[str, Any] = dataset["train"].features["labels"].names
snake_case_ , snake_case_ : Optional[Any] = {}, {}
for i, label in enumerate(_a ):
snake_case_ : Optional[int] = str(_a )
snake_case_ : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case_ : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_a ) , labelaid=_a , idalabel=_a , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case_ : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case_ : Optional[Any] = image_processor.size["shortest_edge"]
else:
snake_case_ : str = (image_processor.size["height"], image_processor.size["width"])
snake_case_ : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case_ : Union[str, Any] = Compose(
[
RandomResizedCrop(_a ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case_ : List[Any] = Compose(
[
Resize(_a ),
CenterCrop(_a ),
ToTensor(),
normalize,
] )
def train_transforms(_a : Optional[int] ):
snake_case_ : List[str] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_a : List[Any] ):
snake_case_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
snake_case_ : str = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_a )
# Initalize our trainer
snake_case_ : Optional[Any] = Trainer(
model=_a , args=_a , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : Tuple = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , _a )
trainer.save_metrics("eval" , _a )
# Write model card and (optionally) push to hub
snake_case_ : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
if __name__ == "__main__":
main()
| 568
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def __a ( A , A , A ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668
| 0
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__UpperCAmelCase = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__UpperCAmelCase = 10
__UpperCAmelCase = 256
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if len(__UpperCamelCase ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase__ : Union[str, Any] = MinHash(num_perm=__UpperCamelCase )
for token in set(__UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(__UpperCamelCase ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self : Tuple ,*,
A : float = 0.8_5 ,):
'''simple docstring'''
UpperCAmelCase__ : Any = duplication_jaccard_threshold
UpperCAmelCase__ : Any = NUM_PERM
UpperCAmelCase__ : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm )
UpperCAmelCase__ : Any = defaultdict(A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ,A : MinHash ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self._index.query(A )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(A ,A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase__ : Dict = [base] + list(A )
# reformat the cluster to be a list of dict
UpperCAmelCase__ : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def __lowercase ( self : Any ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_duplicate_clusters()
with open(A ,"""w""" ) as f:
json.dump(A ,A )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = element
UpperCAmelCase__ : List[str] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__UpperCamelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=__UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__UpperCamelCase ) ) , max_queue_size=100 ) ):
di.add(__UpperCamelCase , __UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_tokens(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = get_tokens(__UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__UpperCAmelCase = None
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
for elementa in cluster:
UpperCAmelCase__ : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCAmelCase__ : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__UpperCamelCase , __UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase__ : Optional[Any] = 1
extremes.append(__UpperCamelCase )
return extremes
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
global _shared_dataset
UpperCAmelCase__ : Union[str, Any] = dataset
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=__UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__UpperCamelCase , __UpperCamelCase , ) , total=len(__UpperCamelCase ) , ):
extremes_list.append(__UpperCamelCase )
return extremes_list
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 0.85 ):
'''simple docstring'''
UpperCAmelCase__ : Any = make_duplicate_clusters(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Optional[Any] = find_extremes(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase__ : Tuple = element
UpperCAmelCase__ : List[Any] = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase__ : Union[str, Any] = dataset.filter(lambda __UpperCamelCase , __UpperCamelCase : idx not in remove_indices , with_indices=__UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase__ : Union[str, Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase__ : Optional[int] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(__UpperCamelCase )}" )
print(F"Number of duplicate clusters: {len(__UpperCamelCase )}" )
print(F"Files in duplicate cluster: {len(__UpperCamelCase )}" )
print(F"Unique files in duplicate cluster: {len(__UpperCamelCase )}" )
print(F"Filtered dataset size: {len(__UpperCamelCase )}" )
return ds_filter, duplicate_clusters
| 65
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_UpperCamelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def __A ( self , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : str = ort.SessionOptions()
_lowerCAmelCase : Tuple = False
return options
def __A ( self ):
_lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((128, 128) )
_lowerCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 213
| 0
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : str = """MIT"""
lowerCAmelCase_ : Optional[Any] = """1.0.0"""
lowerCAmelCase_ : Union[str, Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : Any = """contact@muhammadumerfarooq.me"""
lowerCAmelCase_ : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCamelCase : list[str] = []
_UpperCamelCase : int = domain
def snake_case__ ( self : str , lowercase__ : str , lowercase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase : Optional[Any] = parse.urljoin(self.domain , lowercase__ )
self.urls.append(lowercase__ )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(UpperCAmelCase ).split("." )[-2:] )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return parse.urlparse(UpperCAmelCase ).netloc
def __A ( UpperCAmelCase = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCamelCase : int = get_domain_name(UpperCAmelCase )
# Initialize the parser
_UpperCamelCase : Any = Parser(UpperCAmelCase )
try:
# Open URL
_UpperCamelCase : Union[str, Any] = requests.get(UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase : int = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase : Dict = requests.get(UpperCAmelCase )
# Get the valid email.
_UpperCamelCase : List[str] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 204
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : Dict , lowercase__ : str ) ->Tuple:
'''simple docstring'''
with open(lowercase__ , encoding="utf-8" ) as input_file:
_UpperCamelCase : Optional[int] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
_UpperCamelCase : Dict = input_file.read()
_UpperCamelCase : Dict = regexp.search(lowercase__ )
return match
def snake_case__ ( self : str , lowercase__ : str ) ->Tuple:
'''simple docstring'''
with open(lowercase__ , encoding="utf-8" ) as input_file:
_UpperCamelCase : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
_UpperCamelCase : Optional[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : List[str] = regexp.finditer(lowercase__ )
_UpperCamelCase : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Any = Path("./datasets" )
_UpperCamelCase : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase__ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = Path("./datasets" )
_UpperCamelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase__ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 204
| 1
|
"""simple docstring"""
def A_ ( snake_case__ ) -> bool:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(snake_case__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(snake_case__ ) == 1:
return True
_UpperCamelCase :Optional[Any] = series[1] - series[0]
for index in range(len(snake_case__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A_ ( snake_case__ ) -> float:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(snake_case__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_UpperCamelCase :Union[str, Any] = 0
for val in series:
answer += val
return answer / len(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :List[str] = DanceDiffusionPipeline
a__ :Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a__ :Tuple = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
a__ :Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a__ :List[str] = False
a__ :List[str] = False
def A_ (self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ : Optional[Any] = IPNDMScheduler()
UpperCamelCase_ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def A_ (self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : str = self.get_dummy_components()
UpperCamelCase_ : List[str] = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : int = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : int = pipe(**__UpperCamelCase )
UpperCamelCase_ : Dict = output.audios
UpperCamelCase_ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ : Dict = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def A_ (self ) -> str:
return super().test_save_load_local()
@skip_mps
def A_ (self ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def A_ (self ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def A_ (self ) -> Union[str, Any]:
return super().test_attention_slicing_forward_pass()
def A_ (self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = torch_device
UpperCamelCase_ : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_ : Optional[Any] = output.audios
UpperCamelCase_ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ : List[str] = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def A_ (self ) -> List[str]:
UpperCamelCase_ : List[str] = torch_device
UpperCamelCase_ : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ : Union[str, Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Any = torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_ : Any = output.audios
UpperCamelCase_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ : int = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 702
|
import baseaa
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : bytes ):
return baseaa.baadecode(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = "Hello World!"
SCREAMING_SNAKE_CASE : int = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE : Union[str, Any] = baseaa_decode(encoded)
print(decoded)
| 138
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["text", "image", "audio"]
def lowercase ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def lowercase ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('text' )
elif isinstance(SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class a_ :
def A_( self ) -> int:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
SCREAMING_SNAKE_CASE_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ = self.tool(*SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def A_( self ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ = self.tool(*SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE , self.tool.outputs ):
SCREAMING_SNAKE_CASE_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def A_( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_ = self.tool(*SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 205
|
class a_ :
def __init__( self , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = False ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
# A node will be a leaf if the tree contains its word
SCREAMING_SNAKE_CASE_ = is_leaf
SCREAMING_SNAKE_CASE_ = prefix
def A_( self , SCREAMING_SNAKE_CASE ) -> tuple[str, str, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A_( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.prefix == word:
SCREAMING_SNAKE_CASE_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
SCREAMING_SNAKE_CASE_ = RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = self.nodes[word[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
SCREAMING_SNAKE_CASE_ = remaining_prefix
SCREAMING_SNAKE_CASE_ = self.nodes[matching_string[0]]
SCREAMING_SNAKE_CASE_ = RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = aux_node
if remaining_word == "":
SCREAMING_SNAKE_CASE_ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
SCREAMING_SNAKE_CASE_ = list(self.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ = merging_node.is_leaf
self.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
SCREAMING_SNAKE_CASE_ = False
# If there is 1 edge, we merge it with its child
else:
SCREAMING_SNAKE_CASE_ = list(incoming_node.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ = merging_node.nodes
return True
def A_( self , SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase ( ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE_ = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = RadixNode()
SCREAMING_SNAKE_CASE_ = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('Words:' , SCREAMING_SNAKE_CASE )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 205
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case (__lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCAmelCase__ :str = StableDiffusionInpaintPipeline
lowerCAmelCase__ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ :str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ :Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ :Any = frozenset([] )
def _a ( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=__A ,)
lowercase__ = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
lowercase__ = CLIPTextModel(__A )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ) -> Tuple:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowercase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__A ) ).to(__A )
lowercase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((64, 64) )
lowercase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(__A ).startswith("mps" ):
lowercase__ = torch.manual_seed(__A )
else:
lowercase__ = torch.Generator(device=__A ).manual_seed(__A )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _a ( self ) -> Optional[Any]:
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInpaintPipeline(**__A )
lowercase__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowercase__ = self.get_dummy_inputs(__A )
lowercase__ = sd_pipe(**__A ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case (unittest.TestCase ):
def _a ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[int]:
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowercase__ = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(__A ,safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowercase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__A ,image=__A ,mask_image=__A ,generator=__A ,output_type="np" ,)
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self ) -> Optional[Any]:
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowercase__ = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
__A ,torch_dtype=torch.floataa ,safety_checker=__A ,)
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowercase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__A ,image=__A ,mask_image=__A ,generator=__A ,output_type="np" ,)
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ = PNDMScheduler.from_pretrained(__A ,subfolder="scheduler" )
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
__A ,safety_checker=__A ,scheduler=__A ,torch_dtype=torch.floataa ,)
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=__A ,image=__A ,mask_image=__A ,generator=__A ,num_inference_steps=2 ,output_type="np" ,)
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 714
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class snake_case (UpperCamelCase ):
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = {}
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> str:
lowercase__ = super().add_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=1 ,**UpperCAmelCase_ ) -> List[Any]:
lowercase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
else:
lowercase__ = []
for i in range(UpperCAmelCase_ ):
lowercase__ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase__ = output
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ) -> Optional[int]:
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = []
for i in range(len(UpperCAmelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=UpperCAmelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase__ = self.token_map[placeholder_token]
lowercase__ = tokens[: 1 + int(len(UpperCAmelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase__ = copy.copy(UpperCAmelCase_ )
random.shuffle(UpperCAmelCase_ )
lowercase__ = text.replace(UpperCAmelCase_ ," ".join(UpperCAmelCase_ ) )
return text
def __call__( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> Optional[int]:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 539
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : int ={
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =['''ConvNextFeatureExtractor''']
A__ : Any =['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] =[
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 207
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A__ : Union[str, Any] =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase , lowerCAmelCase ) for gt in ground_truths )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = []
if args.gold_data_mode == "qa":
_lowerCAmelCase = pd.read_csv(lowerCAmelCase , sep="""\t""" , header=lowerCAmelCase )
for answer_list in data[1]:
_lowerCAmelCase = ast.literal_eval(lowerCAmelCase )
answers.append(lowerCAmelCase )
else:
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = [[reference] for reference in references]
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0
for prediction, ground_truths in zip(lowerCAmelCase , lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
fa += metric_max_over_ground_truths(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = 100.0 * em / total
_lowerCAmelCase = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = args.k
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = [line.strip() for line in open(lowerCAmelCase , """r""" ).readlines()]
_lowerCAmelCase = _lowerCAmelCase = 0
for hypo, reference in zip(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
_lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
def strip_title(lowerCAmelCase ):
if title.startswith("""\"""" ):
_lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
_lowerCAmelCase = title[:-1]
return title
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase , truncation=lowerCAmelCase , )["""input_ids"""].to(args.device )
_lowerCAmelCase = rag_model.rag.question_encoder(lowerCAmelCase )
_lowerCAmelCase = question_enc_outputs[0]
_lowerCAmelCase = rag_model.retriever(
lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
_lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase = []
for docs in all_docs:
_lowerCAmelCase = [strip_title(lowerCAmelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase ) )
return provenance_strings
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with torch.no_grad():
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase , truncation=lowerCAmelCase )
_lowerCAmelCase = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase , attention_mask=lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
if args.print_predictions:
for q, a in zip(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase , lowerCAmelCase ) )
return answers
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
if args.model_type is None:
_lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
_lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
_lowerCAmelCase = args.n_docs
if args.index_name is not None:
_lowerCAmelCase = args.index_name
if args.index_path is not None:
_lowerCAmelCase = args.index_path
else:
_lowerCAmelCase = BartForConditionalGeneration
_lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase )
_lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
_lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
_lowerCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase , retriever=lowerCAmelCase , **lowerCAmelCase )
model.retriever.init_retrieval()
else:
_lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
_lowerCAmelCase = []
for line in tqdm(lowerCAmelCase ):
questions.append(line.strip() )
if len(lowerCAmelCase ) == args.eval_batch_size:
_lowerCAmelCase = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("""\n""".join(lowerCAmelCase ) + """\n""" )
preds_file.flush()
_lowerCAmelCase = []
if len(lowerCAmelCase ) > 0:
_lowerCAmelCase = evaluate_batch_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
preds_file.write("""\n""".join(lowerCAmelCase ) )
preds_file.flush()
score_fn(lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A__ : Tuple =get_args()
main(args)
| 207
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
SCREAMING_SNAKE_CASE = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase__ , lowercase__ )
def A ( self , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self , **lowercase__ ) -> Dict:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowercase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=lowercase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def A ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(images=lowercase__ , visual_prompt=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
| 406
| 0
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE_: Tuple = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_: str = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_: Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE_: Any = False
def __lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
_lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=0 ) -> Any:
"""simple docstring"""
if str(UpperCAmelCase_ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
_lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = pipe('anime turle' , generator=UpperCAmelCase_ , output_type='np' )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 580
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = "gpt_bigcode"
SCREAMING_SNAKE_CASE_: str = ["past_key_values"]
SCREAMING_SNAKE_CASE_: str = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , UpperCAmelCase_ : Dict=50_257 , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple="gelu_pytorch_tanh" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=50_256 , UpperCAmelCase_ : Tuple=50_256 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 580
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableUnCLIPPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCamelCase = False
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = 32
__UpperCAmelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=a_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a_ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=a_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=a_ )
__UpperCAmelCase : str = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a_ , layers_per_block=1 , upcast_attention=a_ , use_linear_projection=a_ , )
torch.manual_seed(0 )
__UpperCAmelCase : Any = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=a_ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoencoderKL()
__UpperCAmelCase : Union[str, Any] = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def snake_case__ ( self : Optional[Any] , a_ : Dict , a_ : Any=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__UpperCAmelCase : Any = torch.manual_seed(a_ )
else:
__UpperCAmelCase : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
__UpperCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a_ )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a_ )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : int = pipe('''anime turle''' , generator=a_ , output_type='''np''' )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a_ , a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 241
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a_ : Tuple , a_ : List[str]=13 , a_ : Dict=7 , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : Dict=True , a_ : int=True , a_ : int=99 , a_ : int=32 , a_ : str=5 , a_ : int=4 , a_ : Any=37 , a_ : str="gelu" , a_ : List[str]=0.1 , a_ : Optional[Any]=0.1 , a_ : Tuple=5_12 , a_ : Optional[Any]=16 , a_ : Union[str, Any]=2 , a_ : Optional[Any]=0.0_2 , a_ : List[Any]=3 , a_ : Dict=4 , a_ : Tuple=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[int] = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : Any = scope
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def snake_case__ ( self : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : int = NystromformerModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : List[str] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
__UpperCAmelCase : List[Any] = model(a_ , token_type_ids=a_ )
__UpperCAmelCase : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict , a_ : Dict , a_ : Any , a_ : Tuple , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = NystromformerForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Any = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : int , a_ : Dict , a_ : Tuple , a_ : List[str] , a_ : Tuple , a_ : str , a_ : Tuple , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = NystromformerForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Optional[int] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : str , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : str , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[Any] = NystromformerForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str , a_ : str , a_ : List[str] , a_ : Dict , a_ : str , a_ : Optional[Any] , a_ : Dict , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = NystromformerForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Dict , a_ : Tuple , a_ : List[str] , a_ : Optional[int] , a_ : Tuple , a_ : int , a_ : Tuple , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.num_choices
__UpperCAmelCase : Any = NystromformerForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = NystromformerModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def snake_case__ ( self : Dict ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = NystromformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(a_ )[0]
__UpperCAmelCase : Optional[int] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , a_ )
__UpperCAmelCase : List[Any] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = '''the [MASK] of Belgium is Brussels'''
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : List[str] = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Any = tokenizer(a_ , return_tensors='''pt''' )
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(encoding.input_ids ).logits
__UpperCAmelCase : Any = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(a_ ) , '''capital''' )
| 241
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
__lowerCAmelCase = nn.Dropout(p=lowerCAmelCase_ )
__lowerCAmelCase = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
__lowerCAmelCase = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
__lowerCAmelCase = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
__lowerCAmelCase = TaLayerNorm(lowerCAmelCase_ )
__lowerCAmelCase = nn.Dropout(p=lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.token_embedder(lowerCAmelCase_ )
__lowerCAmelCase = encoder_input_tokens.shape[1]
__lowerCAmelCase = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
__lowerCAmelCase = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
__lowerCAmelCase = encoder_input_tokens.size()
__lowerCAmelCase = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
__lowerCAmelCase = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
__lowerCAmelCase = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 53
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self , _snake_case = 128 , _snake_case = 256 , _snake_case = 2000.0 , _snake_case = 768 , _snake_case = 12 , _snake_case = 12 , _snake_case = 64 , _snake_case = 2048 , _snake_case = 0.1 , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case ) , nn.SiLU() , )
UpperCAmelCase = nn.Embedding(_snake_case , _snake_case )
UpperCAmelCase = False
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase = nn.Dropout(p=_snake_case )
UpperCAmelCase = nn.ModuleList()
for lyr_num in range(_snake_case ):
# FiLM conditional T5 decoder
UpperCAmelCase = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
self.decoders.append(_snake_case )
UpperCAmelCase = TaLayerNorm(_snake_case )
UpperCAmelCase = nn.Dropout(p=_snake_case )
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
def snake_case_ ( self , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase = self.conditioning_emb(_snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase = self.position_encoding(_snake_case )
UpperCAmelCase = self.continuous_inputs_projection(_snake_case )
inputs += position_encodings
UpperCAmelCase = self.dropout(_snake_case )
# decoder: No padding present.
UpperCAmelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase = [(x, self.encoder_decoder_mask(_snake_case , _snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
UpperCAmelCase = self.decoder_norm(_snake_case )
UpperCAmelCase = self.post_dropout(_snake_case )
UpperCAmelCase = self.spec_out(_snake_case )
return spec_out
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=1e-6 ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case ) )
def snake_case_ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase = self.layer[-1](_snake_case , _snake_case )
return (hidden_states,)
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = TaLayerNorm(_snake_case )
UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
UpperCAmelCase = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
UpperCAmelCase = nn.Dropout(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case=None , _snake_case=None , ) -> Optional[int]:
"""simple docstring"""
# pre_self_attention_layer_norm
UpperCAmelCase = self.layer_norm(_snake_case )
if conditioning_emb is not None:
UpperCAmelCase = self.FiLMLayer(_snake_case , _snake_case )
# Self-attention block
UpperCAmelCase = self.attention(_snake_case )
UpperCAmelCase = hidden_states + self.dropout(_snake_case )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
super().__init__()
UpperCAmelCase = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
UpperCAmelCase = TaLayerNorm(_snake_case , eps=_snake_case )
UpperCAmelCase = nn.Dropout(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case=None , _snake_case=None , ) -> int:
"""simple docstring"""
UpperCAmelCase = self.layer_norm(_snake_case )
UpperCAmelCase = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase = hidden_states + self.dropout(_snake_case )
return layer_output
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCAmelCase = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
UpperCAmelCase = TaLayerNorm(_snake_case , eps=_snake_case )
UpperCAmelCase = nn.Dropout(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.layer_norm(_snake_case )
if conditioning_emb is not None:
UpperCAmelCase = self.film(_snake_case , _snake_case )
UpperCAmelCase = self.DenseReluDense(_snake_case )
UpperCAmelCase = hidden_states + self.dropout(_snake_case )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCAmelCase = nn.Dropout(_snake_case )
UpperCAmelCase = NewGELUActivation()
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.act(self.wi_a(_snake_case ) )
UpperCAmelCase = self.wi_a(_snake_case )
UpperCAmelCase = hidden_gelu * hidden_linear
UpperCAmelCase = self.dropout(_snake_case )
UpperCAmelCase = self.wo(_snake_case )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=1e-6 ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Parameter(torch.ones(_snake_case ) )
UpperCAmelCase = eps
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_snake_case )
UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_snake_case , 3.0 )) ))
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case )
def snake_case_ ( self , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.scale_bias(_snake_case )
UpperCAmelCase , UpperCAmelCase = torch.chunk(_snake_case , 2 , -1 )
UpperCAmelCase = x * (1 + scale) + shift
return x
| 391
|
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(A__ , max_perimeter + 1 ):
UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(A__ ):
UpperCAmelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = pythagorean_triple(A__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 391
| 1
|
from math import isqrt
def lowerCAmelCase( __lowerCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowerCAmelCase( __lowerCamelCase = 10**6 ):
__a = 0
__a = 1
__a = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 559
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = ["pixel_values"]
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase : Optional[int] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : Any = resample
_UpperCamelCase : Tuple = do_center_crop
_UpperCamelCase : str = crop_size
_UpperCamelCase : Any = do_rescale
_UpperCamelCase : Dict = rescale_factor
_UpperCamelCase : int = do_normalize
_UpperCamelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : List[Any] = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCamelCase : Dict = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : List[str] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : Union[str, Any] , ) -> PIL.Image.Image:
_UpperCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : int = get_size_dict(__a , param_name="size" , default_to_square=__a )
_UpperCamelCase : List[Any] = resample if resample is not None else self.resample
_UpperCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : List[Any] = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
_UpperCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : int = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : List[Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_UpperCamelCase : Optional[Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_UpperCamelCase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : List[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : List[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : str = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 624
| 0
|
"""simple docstring"""
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __lowerCamelCase ( SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser('env' )
else:
_UpperCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file',default=_lowerCamelCase,help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.__version__
_UpperCAmelCase = torch.cuda.is_available()
_UpperCAmelCase = is_xpu_available()
_UpperCAmelCase = is_npu_available()
_UpperCAmelCase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
_UpperCAmelCase = load_config_from_file(args.config_file ).to_dict()
_UpperCAmelCase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(_lowerCamelCase ),
"""PyTorch NPU available""": str(_lowerCamelCase ),
"""System RAM""": F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
_UpperCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
_UpperCAmelCase = (
"""\n""".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowerCamelCase,_lowerCamelCase )
else F"""\t{accelerate_config}"""
)
print(_lowerCamelCase )
_UpperCAmelCase = accelerate_config
return info
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = env_command_parser()
_UpperCAmelCase = parser.parse_args()
env_command(_lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 719
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 494
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_UpperCamelCase = None
_UpperCamelCase = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_UpperCamelCase = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __lowercase :
_UpperCamelCase = True
_UpperCamelCase = None
# Automatically constructed
_UpperCamelCase = "PIL.Image.Image"
_UpperCamelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
_UpperCamelCase = field(default="""Image""" , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) ->str:
'''simple docstring'''
return self.pa_type
def UpperCamelCase__ ( self , A_ ) ->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(A_ , A_ ):
__lowerCAmelCase : List[Any] = np.array(A_ )
if isinstance(A_ , A_ ):
return {"path": value, "bytes": None}
elif isinstance(A_ , A_ ):
return {"path": None, "bytes": value}
elif isinstance(A_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A_ )
elif isinstance(A_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase__ ( self , A_ , A_=None ) ->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__lowerCAmelCase : Any = {}
__lowerCAmelCase, __lowerCAmelCase : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(A_ ):
__lowerCAmelCase : str = PIL.Image.open(A_ )
else:
__lowerCAmelCase : Tuple = path.split('''::''' )[-1]
try:
__lowerCAmelCase : Any = string_to_dict(A_ , config.HUB_DATASETS_URL )['''repo_id''']
__lowerCAmelCase : Optional[int] = token_per_repo_id.get(A_ )
except ValueError:
__lowerCAmelCase : Dict = None
with xopen(A_ , '''rb''' , use_auth_token=A_ ) as f:
__lowerCAmelCase : List[Any] = BytesIO(f.read() )
__lowerCAmelCase : Optional[int] = PIL.Image.open(bytes_ )
else:
__lowerCAmelCase : str = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def UpperCamelCase__ ( self , A_ ) ->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowerCAmelCase : Union[str, Any] = pa.array([None] * len(A_ ) , type=pa.binary() )
__lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase : List[Any] = pa.array([None] * len(A_ ) , type=pa.string() )
__lowerCAmelCase : int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__lowerCAmelCase : int = storage.field('''bytes''' )
else:
__lowerCAmelCase : Dict = pa.array([None] * len(A_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__lowerCAmelCase : Optional[Any] = storage.field('''path''' )
else:
__lowerCAmelCase : Optional[Any] = pa.array([None] * len(A_ ) , type=pa.string() )
__lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCAmelCase : str = pa.array(
[encode_np_array(np.array(A_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCAmelCase : List[Any] = pa.array([None] * len(A_ ) , type=pa.string() )
__lowerCAmelCase : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def UpperCamelCase__ ( self , A_ ) ->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(A_ ):
with xopen(A_ , '''rb''' ) as f:
__lowerCAmelCase : str = f.read()
return bytes_
__lowerCAmelCase : Tuple = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase : Any = pa.array(
[os.path.basename(A_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A_ , self.pa_type )
def _lowercase ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCAmelCase : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__lowerCAmelCase : Tuple = image.format
else:
__lowerCAmelCase : Dict = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _lowercase ( lowercase__ ):
if hasattr(lowercase__ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _lowercase ( lowercase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__lowerCAmelCase : Optional[int] = array.dtype
__lowerCAmelCase : str = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__lowerCAmelCase : Optional[int] = dtype.kind
__lowerCAmelCase : Union[str, Any] = dtype.itemsize
__lowerCAmelCase : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCAmelCase : List[str] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCAmelCase : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCAmelCase : str = dtype_byteorder + dtype_kind + str(lowercase__ )
__lowerCAmelCase : Tuple = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
__lowerCAmelCase : Tuple = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _lowercase ( lowercase__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
__lowerCAmelCase : int = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
__lowerCAmelCase : Optional[Any] = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 492
|
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = int(lowercase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : Dict = divmod(lowercase__ , 2 )
return binary_recursive(lowercase__ ) + str(lowercase__ )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = str(lowercase__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__lowerCAmelCase : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
__lowerCAmelCase : Dict = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(lowercase__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 492
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = tmp_path / 'cache'
__a = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__a = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = tmp_path / 'cache'
__a = os.path.join(__lowerCamelCase , 'tmp.sql' )
__a = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
__a = iter_sql_file(__lowerCamelCase )
__a = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = tmp_path / 'cache'
__a = os.path.join(__lowerCamelCase , 'tmp.sql' )
__a = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
__a = iter_sql_file(__lowerCamelCase )
__a = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = tmp_path / 'cache'
__a = os.path.join(__lowerCamelCase , 'tmp.sql' )
__a = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 246
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=1_0 , UpperCAmelCase=1_8 , UpperCAmelCase=3_0 , UpperCAmelCase=4_0_0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=None , ) -> Tuple:
__a = size if size is not None else {'shortest_edge': 1_8}
__a = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__a = parent
__a = batch_size
__a = num_channels
__a = num_frames
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = crop_size
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( __snake_case , unittest.TestCase ):
A__ : Tuple = VivitImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = VivitImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__a = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 246
| 1
|
"""simple docstring"""
from math import sqrt
def _snake_case ( snake_case__ : int ):
A = 0
for i in range(1 , int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _snake_case ( snake_case__ : int = 1_0000 ):
A = sum(
i
for i in range(1 , snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 91
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : Tuple = 1.0_5457_1817e-34 # unit of ℏ : J * s
_lowerCAmelCase : int = 3e8 # unit of c : m * s^-1
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = os.path.abspath(_A )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
lowerCAmelCase_ = tf.train.list_variables(_A )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase_ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase_ = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase_ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_A )
# read data
lowerCAmelCase_ = tf.train.load_variable(_A , _A )
names.append('''/'''.join(_A ) )
arrays.append(_A )
logger.info(f"Read a total of {len(_A ):,} layers" )
# Sanity check
if len(set(_A ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(_A ) )})" )
lowerCAmelCase_ = list(set(_A ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_A , _A ):
lowerCAmelCase_ = full_name.split('''/''' )
lowerCAmelCase_ = model
lowerCAmelCase_ = []
for i, m_name in enumerate(_A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowerCAmelCase_ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowerCAmelCase_ = getattr(_A , '''embeddings''' )
lowerCAmelCase_ = getattr(_A , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowerCAmelCase_ = getattr(_A , '''encoder''' )
lowerCAmelCase_ = getattr(_A , '''layer''' )
lowerCAmelCase_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowerCAmelCase_ = getattr(_A , '''pooler''' )
lowerCAmelCase_ = getattr(_A , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowerCAmelCase_ = getattr(_A , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowerCAmelCase_ = getattr(_A , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowerCAmelCase_ = getattr(_A , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowerCAmelCase_ = getattr(_A , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
lowerCAmelCase_ = getattr(_A , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowerCAmelCase_ = getattr(_A , '''attention''' )
lowerCAmelCase_ = getattr(_A , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowerCAmelCase_ = getattr(_A , '''attention''' )
lowerCAmelCase_ = getattr(_A , '''output''' )
lowerCAmelCase_ = getattr(_A , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowerCAmelCase_ = getattr(_A , '''attention''' )
lowerCAmelCase_ = getattr(_A , '''output''' )
lowerCAmelCase_ = getattr(_A , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowerCAmelCase_ = getattr(_A , '''output''' )
lowerCAmelCase_ = getattr(_A , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowerCAmelCase_ = getattr(_A , '''output''' )
lowerCAmelCase_ = getattr(_A , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowerCAmelCase_ = getattr(_A , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowerCAmelCase_ = getattr(_A , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowerCAmelCase_ = getattr(_A , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowerCAmelCase_ = getattr(_A , '''intermediate''' )
lowerCAmelCase_ = getattr(_A , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowerCAmelCase_ = getattr(_A , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowerCAmelCase_ = getattr(_A , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowerCAmelCase_ = getattr(_A , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
lowerCAmelCase_ = '''.'''.join(_A )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , _A ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , _A ):
lowerCAmelCase_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase_ = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase_ = torch.from_numpy(_A )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( _A , _A , _A ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
lowerCAmelCase_ = BertConfig.from_json_file(_A )
lowerCAmelCase_ = BertModel(_A )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_A , _A , _A )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_A = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 325
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''ZinengTang/tvlt-base'''
lowerCAmelCase_ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor, UpperCamelCase__ )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = feature_extractor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''', )
| 325
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase : int =logging.get_logger(__name__)
def a__ (__lowercase :Optional[Any] , __lowercase :List[Any] , __lowercase :Union[str, Any] ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def a__ (__lowercase :np.ndarray , __lowercase :Optional[str] , __lowercase :Optional[str] ) -> Optional[int]:
_A : Tuple = to_pil_image(__lowercase )
_A , _A : Union[str, Any] = pil_image.size
_A : Tuple = pytesseract.image_to_data(__lowercase , lang=__lowercase , output_type='''dict''' , config=__lowercase )
_A , _A , _A , _A , _A : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_A : Union[str, Any] = [idx for idx, word in enumerate(__lowercase ) if not word.strip()]
_A : int = [word for idx, word in enumerate(__lowercase ) if idx not in irrelevant_indices]
_A : List[str] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
_A : Tuple = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
_A : List[str] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
_A : Optional[int] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A : Union[str, Any] = []
for x, y, w, h in zip(__lowercase , __lowercase , __lowercase , __lowercase ):
_A : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(__lowercase )
# finally, normalize the bounding boxes
_A : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowercase , __lowercase , __lowercase ) )
assert len(__lowercase ) == len(__lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = ["pixel_values"]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = None ,A__ = None ,A__ = True ,A__ = None ,A__ = "" ,**A__ ,):
super().__init__(**A__ )
_A : List[Any] = size if size is not None else {'''height''': 224, '''width''': 224}
_A : Any = get_size_dict(A__ )
_A : str = do_resize
_A : str = size
_A : Optional[Any] = resample
_A : Optional[int] = do_rescale
_A : List[str] = rescale_value
_A : Dict = do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
_A : Any = apply_ocr
_A : Optional[Any] = ocr_lang
_A : Union[str, Any] = tesseract_config
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
_A : Dict = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A : Union[str, Any] = (size['''height'''], size['''width'''])
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : Dict = do_resize if do_resize is not None else self.do_resize
_A : Optional[int] = size if size is not None else self.size
_A : List[Any] = get_size_dict(A__ )
_A : Tuple = resample if resample is not None else self.resample
_A : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : int = do_normalize if do_normalize is not None else self.do_normalize
_A : List[Any] = image_mean if image_mean is not None else self.image_mean
_A : List[Any] = image_std if image_std is not None else self.image_std
_A : str = apply_ocr if apply_ocr is not None else self.apply_ocr
_A : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
_A : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
_A : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_A : Union[str, Any] = [to_numpy_array(A__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'''pytesseract''' )
_A : int = []
_A : Tuple = []
for image in images:
_A , _A : List[Any] = apply_tesseract(A__ ,A__ ,A__ )
words_batch.append(A__ )
boxes_batch.append(A__ )
if do_resize:
_A : Union[str, Any] = [self.resize(image=A__ ,size=A__ ,resample=A__ ) for image in images]
if do_rescale:
_A : Optional[Any] = [self.rescale(image=A__ ,scale=A__ ) for image in images]
if do_normalize:
_A : List[Any] = [self.normalize(image=A__ ,mean=A__ ,std=A__ ) for image in images]
_A : Optional[Any] = [to_channel_dimension_format(A__ ,A__ ) for image in images]
_A : Any = BatchFeature(data={'''pixel_values''': images} ,tensor_type=A__ )
if apply_ocr:
_A : Tuple = words_batch
_A : str = boxes_batch
return data
| 206
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = True
__snake_case : bool = False
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : Optional[Any] = []
_A : str = []
for i in range(self.num_layers ):
_A : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
_A : str = FlaxResnetBlockaD(
in_channels=A__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : List[str] = resnets
_A : Any = attentions
if self.add_downsample:
_A : Any = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
_A : List[str] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
_A : Optional[Any] = resnet(A__ ,A__ ,deterministic=A__ )
_A : Tuple = attn(A__ ,A__ ,deterministic=A__ )
output_states += (hidden_states,)
if self.add_downsample:
_A : Optional[int] = self.downsamplers_a(A__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : bool = True
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : List[Any] = []
for i in range(self.num_layers ):
_A : int = self.in_channels if i == 0 else self.out_channels
_A : List[Any] = FlaxResnetBlockaD(
in_channels=A__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : Tuple = resnets
if self.add_downsample:
_A : Optional[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__=True ):
_A : List[Any] = ()
for resnet in self.resnets:
_A : Optional[int] = resnet(A__ ,A__ ,deterministic=A__ )
output_states += (hidden_states,)
if self.add_downsample:
_A : List[Any] = self.downsamplers_a(A__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = True
__snake_case : bool = False
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : List[str] = []
_A : Optional[Any] = []
for i in range(self.num_layers ):
_A : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
_A : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : Dict = resnets
_A : int = attentions
if self.add_upsample:
_A : str = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__ ,A__=True ):
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
_A : List[Any] = res_hidden_states_tuple[-1]
_A : int = res_hidden_states_tuple[:-1]
_A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_A : str = resnet(A__ ,A__ ,deterministic=A__ )
_A : Optional[int] = attn(A__ ,A__ ,deterministic=A__ )
if self.add_upsample:
_A : Union[str, Any] = self.upsamplers_a(A__ )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : bool = True
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : Optional[Any] = []
for i in range(self.num_layers ):
_A : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_A : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : int = resnets
if self.add_upsample:
_A : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
for resnet in self.resnets:
# pop res hidden states
_A : Tuple = res_hidden_states_tuple[-1]
_A : Optional[int] = res_hidden_states_tuple[:-1]
_A : Any = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_A : Optional[Any] = resnet(A__ ,A__ ,deterministic=A__ )
if self.add_upsample:
_A : Tuple = self.upsamplers_a(A__ )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
# there is always at least one resnet
_A : List[str] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
_A : Union[str, Any] = []
for _ in range(self.num_layers ):
_A : Any = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : List[str] = resnets
_A : str = attentions
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
_A : Optional[int] = self.resnets[0](A__ ,A__ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
_A : Dict = attn(A__ ,A__ ,deterministic=A__ )
_A : Any = resnet(A__ ,A__ ,deterministic=A__ )
return hidden_states
| 206
| 1
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case = datasets.logging.get_logger(__name__)
_snake_case = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_snake_case = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_snake_case = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_snake_case = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def a__ ( self , _a ) -> Union[str, Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_A : List[Any] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : List[Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_A : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A : Any = score.BleurtScorer(os.path.join(_a , _a ) )
def a__ ( self , _a , _a ) -> Tuple:
_A : Union[str, Any] = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
'''simple docstring'''
def __snake_case ( lowercase : int = 1_000 ):
snake_case_ = 2**power
snake_case_ = str(lowercase )
snake_case_ = list(lowercase )
snake_case_ = 0
for i in list_num:
sum_of_num += int(lowercase )
return sum_of_num
if __name__ == "__main__":
lowercase__ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowercase__ = solution(power)
print('''Sum of the digits is: ''', result)
| 508
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 508
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[Any]:
__UpperCAmelCase =filter(lambda snake_case__ : p.requires_grad , model.parameters() )
__UpperCAmelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase_ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> str:
if metric == "rouge2":
__UpperCAmelCase ='''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__UpperCAmelCase ='''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__UpperCAmelCase ='''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__UpperCAmelCase =ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[int]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=snake_case__ , verbose=snake_case__ , )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase ={f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(UpperCAmelCase)
@rank_zero_only
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""")
__UpperCAmelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']})
# Log results
__UpperCAmelCase =Path(pl_module.hparams.output_dir)
if type_path == "test":
__UpperCAmelCase =od / '''test_results.txt'''
__UpperCAmelCase =od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCAmelCase =od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__UpperCAmelCase =od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase)
generations_file.parent.mkdir(exist_ok=UpperCAmelCase)
with open(UpperCAmelCase , '''a+''') as writer:
for key in sorted(UpperCAmelCase):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCAmelCase =metrics[key]
if isinstance(UpperCAmelCase , torch.Tensor):
__UpperCAmelCase =val.item()
__UpperCAmelCase =f"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase)
if not save_generations:
return
if "preds" in metrics:
__UpperCAmelCase ='''\n'''.join(metrics['''preds'''])
generations_file.open('''w+''').write(UpperCAmelCase)
@rank_zero_only
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
try:
__UpperCAmelCase =pl_module.model.model.num_parameters()
except AttributeError:
__UpperCAmelCase =pl_module.model.num_parameters()
__UpperCAmelCase =count_trainable_parameters(UpperCAmelCase)
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6})
@rank_zero_only
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(UpperCAmelCase , UpperCAmelCase , '''test''')
@rank_zero_only
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 712
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(features=UpperCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """
f"""device: {str(jax.devices()[0])}.""")
__UpperCAmelCase =str(jax.devices()[0])
__UpperCAmelCase =jnp_array_kwargs
@staticmethod
def A__ ():
'''simple docstring'''
import jax
return {str(UpperCAmelCase): device for device in jax.devices()}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase) and column:
if all(
isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(UpperCAmelCase , axis=0)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
else:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image):
__UpperCAmelCase =np.asarray(UpperCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array):
__UpperCAmelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
elif isinstance(UpperCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
return self._tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase)
return self.recursive_tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0])
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
__UpperCAmelCase =self._consolidate(UpperCAmelCase)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase)
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
for column_name in batch:
__UpperCAmelCase =self._consolidate(batch[column_name])
return batch
| 142
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = {}
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=1 ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : List[Any] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = []
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[str]:
"""simple docstring"""
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Any=-2 , lowerCAmelCase__ : List[str]=-1 ) -> List[str]:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[int]=-1 ) -> Union[str, Any]:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : Tuple = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any]=-2 ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = deque()
_UpperCAmelCase : Any = []
if s == -2:
_UpperCAmelCase : int = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
_UpperCAmelCase : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : str = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : str = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = -2
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = s
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : List[Any] = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : str = s
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = -2
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any]=-2 , lowerCAmelCase__ : Any=-1 ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = time()
return end - begin
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : int=-2 ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = time()
self.bfs(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = {}
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=1 ) -> List[str]:
"""simple docstring"""
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : List[str] = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Union[str, Any] = [[w, u]]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Any=-2 , lowerCAmelCase__ : Dict=-1 ) -> int:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Any = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int=-1 ) -> Optional[int]:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : Optional[Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Any=-2 ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : Dict = []
if s == -2:
_UpperCAmelCase : Optional[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Any = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : int = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = -2
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : str = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : int = True
if len(lowerCAmelCase__ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
_UpperCAmelCase : int = False
indirect_parents.append(lowerCAmelCase__ )
_UpperCAmelCase : str = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str=-2 , lowerCAmelCase__ : Dict=-1 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = time()
return end - begin
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any]=-2 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = time()
self.bfs(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = time()
return end - begin
| 494
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Optional[Any] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : List[Any] = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.dummy_uncond_unet
lowercase__ : int = PNDMScheduler()
lowercase__ : int = PNDMPipeline(unet=_snake_case ,scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Dict = pndm(generator=_snake_case ,num_inference_steps=20 ,output_type='''numpy''' ).images
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pndm(generator=_snake_case ,num_inference_steps=20 ,output_type='''numpy''' ,return_dict=_snake_case )[0]
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = """google/ddpm-cifar10-32"""
lowercase__ : Optional[Any] = UNetaDModel.from_pretrained(_snake_case )
lowercase__ : List[Any] = PNDMScheduler()
lowercase__ : List[str] = PNDMPipeline(unet=_snake_case ,scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : Optional[int] = pndm(generator=_snake_case ,output_type='''numpy''' ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Any = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 708
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Any = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowercase__ : Optional[Any] = DetaConfig(
backbone_config=__lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
lowercase__ : Any = '''huggingface/label-files'''
if "o365" in model_name:
lowercase__ : Optional[Any] = 3_66
lowercase__ : List[Any] = '''object365-id2label.json'''
else:
lowercase__ : Union[str, Any] = 91
lowercase__ : Optional[int] = '''coco-detection-id2label.json'''
lowercase__ : List[Any] = num_labels
lowercase__ : str = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = dct.pop(__lowerCamelCase )
lowercase__ : Optional[int] = val
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : List[Any] = in_proj_weight[:dim, :]
lowercase__ : List[str] = in_proj_bias[: dim]
lowercase__ : Dict = in_proj_weight[
dim : dim * 2, :
]
lowercase__ : str = in_proj_bias[
dim : dim * 2
]
lowercase__ : str = in_proj_weight[
-dim :, :
]
lowercase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# transformer decoder self-attention layers
lowercase__ : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : Any = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Dict = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[:hidden_size, :]
lowercase__ : str = in_proj_bias[:hidden_size]
lowercase__ : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowercase__ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ : Union[str, Any] = in_proj_weight[-hidden_size:, :]
lowercase__ : Union[str, Any] = in_proj_bias[-hidden_size:]
def __UpperCAmelCase ( ) -> List[Any]:
lowercase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Dict = get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
lowercase__ : List[str] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
lowercase__ : Dict = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
lowercase__ : Optional[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
lowercase__ : List[Any] = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowercase__ : str = state_dict.pop(__lowerCamelCase )
lowercase__ : Dict = val
if "input_proj" in key:
lowercase__ : Any = state_dict.pop(__lowerCamelCase )
lowercase__ : int = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowercase__ : Tuple = state_dict.pop(__lowerCamelCase )
lowercase__ : int = val
# finally, create HuggingFace model and load state dict
lowercase__ : Any = DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
lowercase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__lowerCamelCase )
# load image processor
lowercase__ : Any = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
lowercase__ : Dict = prepare_img()
lowercase__ : Any = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : str = encoding['''pixel_values''']
lowercase__ : Tuple = model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowercase__ : Any = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
lowercase__ : Any = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
lowercase__ : Dict = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
lowercase__ : int = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 122
| 0
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0)
def lowerCamelCase__ ( a__) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_snake_case : set[int] = set()
_snake_case : int
_snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime):
ret.add(sub * prime)
return ret
def lowerCamelCase__ ( a__ = 5_0_0_0) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , a__):
if len(partition(a__)) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 517
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a__) -> bool:
"""simple docstring"""
return len(set(a__)) == len(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[int] , __A: Optional[int] , __A: Optional[Any]=7 , __A: Any=3 , __A: Dict=18 , __A: List[Any]=30 , __A: Union[str, Any]=4_00 , __A: Tuple=True , __A: Dict=None , __A: Optional[int]=True , ) -> Optional[int]:
_A = size if size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = apply_ocr
def __A ( self: List[Any] ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self: Any ) -> Optional[int]:
_A = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self: Optional[int] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self: Optional[Any] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
self.assertTrue(hasattr(__A , '''apply_ocr''' ) )
def __A ( self: Optional[int] ) -> Optional[int]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __A ( self: List[str] ) -> Union[str, Any]:
pass
def __A ( self: Optional[Any] ) -> Tuple:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self: Optional[int] ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self: Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __A ( self: str ) -> Optional[Any]:
# with apply_OCR = True
_A = LayoutLMvaImageProcessor()
from datasets import load_dataset
_A = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
_A = image_processing(__A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
_A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
_A = LayoutLMvaImageProcessor(apply_ocr=__A )
_A = image_processing(__A , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 712
|
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62
| 0
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Linear(3, 4 )
SCREAMING_SNAKE_CASE : Tuple = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(4, 5 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A, model.state_dict() )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(A, 'index.json' )
self.assertTrue(os.path.isfile(A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(A, F"{key}.dat" )
self.assertTrue(os.path.isfile(A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE : int = torch.randn(2, 3, dtype=A )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : str = offload_weight(A, 'weight', A, {} )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(A, 'weight.dat' )
self.assertTrue(os.path.isfile(A ) )
self.assertDictEqual(A, {'weight': {'shape': [2, 3], 'dtype': str(A ).split('.' )[1]}} )
SCREAMING_SNAKE_CASE : Any = load_offloaded_weight(A, index['weight'] )
self.assertTrue(torch.equal(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ModelForTest()
SCREAMING_SNAKE_CASE : List[str] = model.state_dict()
SCREAMING_SNAKE_CASE : str = {k: v for k, v in state_dict.items() if 'linear2' not in k}
SCREAMING_SNAKE_CASE : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A, A )
SCREAMING_SNAKE_CASE : Any = OffloadedWeightsLoader(state_dict=A, save_folder=A )
# Every key is there with the right value
self.assertEqual(sorted(A ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A, weight_map[key] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
SCREAMING_SNAKE_CASE : int = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A, A )
SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=A, save_folder=A )
# Every key is there with the right value
self.assertEqual(sorted(A ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A, A )
# Duplicates are removed
SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=A, save_folder=A )
# Every key is there with the right value
self.assertEqual(sorted(A ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A, weight_map[key] ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {'a.1': 0, 'a.10': 1, 'a.2': 2}
SCREAMING_SNAKE_CASE : Dict = extract_submodules_state_dict(A, ['a.1', 'a.2'] )
self.assertDictEqual(A, {'a.1': 0, 'a.2': 2} )
SCREAMING_SNAKE_CASE : Tuple = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
SCREAMING_SNAKE_CASE : str = extract_submodules_state_dict(A, ['a.1', 'a.2'] )
self.assertDictEqual(A, {'a.1.a': 0, 'a.2.a': 2} )
| 28
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''model_save_dir''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.get('''latest_model_name''' , UpperCamelCase__ )
def __call__( self : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def __A ( UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE : List[str] = '''CPUExecutionProvider'''
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE : Tuple = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[Union[bool, str, None]] = None , UpperCamelCase__ : Optional[Union[str, None]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional["ort.SessionOptions"] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE : int = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).parent
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCamelCase__ ).name
SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = None
if len(str(UpperCamelCase__ ).split('''@''' ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 248
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _snake_case :
@property
def A__ ( self : str ):
return self.get_dummy_input()
@property
def A__ ( self : str ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A__ ( self : Optional[int], __lowercase : Dict=True, __lowercase : List[Any]=False, __lowercase : Optional[int]=False, __lowercase : str=False, ):
lowercase__ = 4
lowercase__ = 32
lowercase__ = (32, 32)
lowercase__ = torch.manual_seed(0 )
lowercase__ = torch.device(lowerCAmelCase__ )
lowercase__ = (batch_size, num_channels) + sizes
lowercase__ = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__ )
lowercase__ = {"hidden_states": hidden_states}
if include_temb:
lowercase__ = 128
lowercase__ = randn_tensor((batch_size, temb_channels), generator=lowerCAmelCase__, device=lowerCAmelCase__ )
if include_res_hidden_states_tuple:
lowercase__ = torch.manual_seed(1 )
lowercase__ = (randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__ ),)
if include_encoder_hidden_states:
lowercase__ = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase__ )
if include_skip_sample:
lowercase__ = randn_tensor(((batch_size, 3) + sizes), generator=lowerCAmelCase__, device=lowerCAmelCase__ )
return dummy_input
def A__ ( self : str ):
lowercase__ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
lowercase__ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self : Any, __lowercase : str ):
lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.block_class(**lowerCAmelCase__ )
unet_block.to(lowerCAmelCase__ )
unet_block.eval()
with torch.no_grad():
lowercase__ = unet_block(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
lowercase__ = output[0]
self.assertEqual(output.shape, self.output_shape )
lowercase__ = output[0, -1, -3:, -3:]
lowercase__ = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
assert torch_all_close(output_slice.flatten(), lowerCAmelCase__, atol=5e-3 )
@unittest.skipIf(torch_device == "mps", "Training is not supported in mps" )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.block_class(**lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
lowercase__ = model(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
lowercase__ = output[0]
lowercase__ = torch.device(lowerCAmelCase__ )
lowercase__ = randn_tensor(output.shape, device=lowerCAmelCase__ )
lowercase__ = torch.nn.functional.mse_loss(lowerCAmelCase__, lowerCAmelCase__ )
loss.backward()
| 709
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: List[Any] , **__A: Dict ) -> Optional[int]:
requires_backends(self , ['''bs4'''] )
super().__init__(**__A )
def __A ( self: Tuple , __A: str ) -> List[Any]:
_A = []
_A = []
_A = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_A = parent.find_all(child.name , recursive=__A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__A ) else next(i for i, s in enumerate(__A , 1 ) if s is child ) )
_A = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __A ( self: Tuple , __A: str ) -> str:
_A = BeautifulSoup(__A , '''html.parser''' )
_A = []
_A = []
_A = []
for element in html_code.descendants:
if type(__A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_A = html.unescape(__A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__A )
_A ,_A = self.xpath_soup(__A )
stringaxtag_seq.append(__A )
stringaxsubs_seq.append(__A )
if len(__A ) != len(__A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__A ) != len(__A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __A ( self: str , __A: Any , __A: List[Any] ) -> str:
_A = ''''''
for tagname, subs in zip(__A , __A ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self: Optional[int] , __A: Dict ) -> BatchFeature:
_A = False
# Check that strings has a valid type
if isinstance(__A , __A ):
_A = True
elif isinstance(__A , (list, tuple) ):
if len(__A ) == 0 or isinstance(html_strings[0] , __A ):
_A = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__A )}.""" )
_A = bool(isinstance(__A , (list, tuple) ) and (isinstance(html_strings[0] , __A )) )
if not is_batched:
_A = [html_strings]
# Get nodes + xpaths
_A = []
_A = []
for html_string in html_strings:
_A ,_A ,_A = self.get_three_from_single(__A )
nodes.append(__A )
_A = []
for node, tag_list, sub_list in zip(__A , __A , __A ):
_A = self.construct_xpath(__A , __A )
xpath_strings.append(__A )
xpaths.append(__A )
# return as Dict
_A = {'''nodes''': nodes, '''xpaths''': xpaths}
_A = BatchFeature(data=__A , tensor_type=__A )
return encoded_inputs
| 484
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "esm"
def __init__( self: Optional[int] , __A: str=None , __A: str=None , __A: Dict=None , __A: List[Any]=7_68 , __A: Union[str, Any]=12 , __A: Any=12 , __A: Optional[int]=30_72 , __A: Optional[Any]=0.1 , __A: str=0.1 , __A: str=10_26 , __A: List[str]=0.02 , __A: Any=1e-12 , __A: List[Any]="absolute" , __A: Optional[Any]=True , __A: int=None , __A: List[str]=False , __A: List[Any]=False , __A: List[Any]=None , __A: Dict=None , **__A: Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=__A , mask_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = emb_layer_norm_before
_A = token_dropout
_A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A = EsmFoldConfig()
elif isinstance(__A , __A ):
_A = EsmFoldConfig(**__A )
_A = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A = get_default_vocab_list()
else:
_A = vocab_list
else:
_A = None
_A = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __A ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self: Dict ) -> Optional[Any]:
_A = super().to_dict()
if isinstance(self.esmfold_config , __A ):
_A = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def __A ( self: Optional[Any] ) -> List[Any]:
if self.trunk is None:
_A = TrunkConfig()
elif isinstance(self.trunk , __A ):
_A = TrunkConfig(**self.trunk )
def __A ( self: Dict ) -> Dict:
_A = asdict(self )
_A = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def __A ( self: Tuple ) -> Any:
if self.structure_module is None:
_A = StructureModuleConfig()
elif isinstance(self.structure_module , __A ):
_A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A = self.sequence_state_dim // self.sequence_head_width
_A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self: List[str] ) -> Optional[int]:
_A = asdict(self )
_A = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1e-8
A_ = 1e5
def __A ( self: Optional[Any] ) -> str:
return asdict(self )
def __A ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 484
| 1
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : Optional[int] = 256
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Optional[int] = ["""melgan"""]
def __init__( self : List[Any] , UpperCamelCase__ : SpectrogramNotesEncoder , UpperCamelCase__ : SpectrogramContEncoder , UpperCamelCase__ : TaFilmDecoder , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
A__ : List[Any] =math.log(1E-5 ) # Matches MelGAN training.
A__ : Any =4.0 # Largest value for most examples
A__ : Union[str, Any] =128
self.register_modules(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=(-1.0, 1.0) , UpperCamelCase__ : Union[str, Any]=False ):
A__ : int =output_range
if clip:
A__ : int =torch.clip(UpperCamelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
A__ : Optional[int] =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=(-1.0, 1.0) , UpperCamelCase__ : int=False ):
A__ : Union[str, Any] =input_range
A__ : Union[str, Any] =torch.clip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if clip else outputs
# Scale to [0, 1].
A__ : Optional[int] =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
A__ : Optional[int] =input_tokens > 0
A__ : int =self.notes_encoder(
encoder_input_tokens=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
A__ : List[str] =self.continuous_encoder(
encoder_inputs=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : Any =noise_time
if not torch.is_tensor(UpperCamelCase__ ):
A__ : Dict =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
A__ : str =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ : List[str] =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A__ : int =self.decoder(
encodings_and_masks=UpperCamelCase__ , decoder_input_tokens=UpperCamelCase__ , decoder_noise_time=UpperCamelCase__ )
return logits
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : List[List[int]] , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : int = 100 , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "numpy" , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__ )}.''' )
A__ : Optional[int] =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A__ : Union[str, Any] =np.zeros([1, 0, self.n_dims] , np.floataa )
A__ : List[Any] =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase__ ):
if i == 0:
A__ : List[Any] =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ : Union[str, Any] =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ : Tuple =ones
A__ : Tuple =self.scale_features(
UpperCamelCase__ , output_range=[-1.0, 1.0] , clip=UpperCamelCase__ )
A__ : Tuple =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase__ , continuous_mask=UpperCamelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ : Union[str, Any] =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ : Optional[int] =self.decode(
encodings_and_masks=UpperCamelCase__ , input_tokens=UpperCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A__ : int =self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
A__ : List[str] =self.scale_to_features(UpperCamelCase__ , input_range=[-1.0, 1.0] )
A__ : List[str] =mel[:1]
A__ : Any =mel.cpu().float().numpy()
A__ : Optional[Any] =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ )
logger.info("Generated segment" , UpperCamelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A__ : List[str] =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ : List[Any] =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase__ )
| 709
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595
| 0
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(UpperCamelCase__ ) == 1:
return True
_SCREAMING_SNAKE_CASE : str = series[1] - series[0]
for index in range(len(UpperCamelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> float:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for val in series:
answer += val
return answer / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = size
A__ = [0] * size
A__ = [0] * size
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index | (index + 1)
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return (index & (index + 1)) - 1
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = value
while index < self.size:
A__ = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
A__ = value
else:
A__ = max(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = self.get_next(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
right -= 1 # Because of right is exclusive
A__ = 0
while left <= right:
A__ = self.get_prev(__lowerCamelCase )
if left <= current_left:
A__ = max(__lowerCamelCase,self.tree[right] )
A__ = current_left
else:
A__ = max(__lowerCamelCase,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 0
|
'''simple docstring'''
_A : List[str] ={
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
_A : Optional[Any] ={value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if set(UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
lowerCamelCase__ : str = """"""
for word in coded.split():
while len(UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase__ : List[str] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631
| 0
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase : Tuple = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def A_( A : List[str]):
UpperCamelCase = {}
state_dict.pop('pixel_mean' , A)
state_dict.pop('pixel_std' , A)
UpperCamelCase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(A , A)
if re.match(A , A):
UpperCamelCase = int(re.match(A , A).group(2))
if layer_nb == 0:
UpperCamelCase = key.replace('layers.0' , 'proj_in')
elif layer_nb == 1:
UpperCamelCase = key.replace('layers.1' , 'layers.0')
elif layer_nb == 2:
UpperCamelCase = key.replace('layers.2' , 'proj_out')
UpperCamelCase = value
UpperCamelCase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def A_( A : Dict , A : List[Any] , A : List[Any] , A : List[str]="ybelkada/segment-anything"):
UpperCamelCase = hf_hub_download(A , f'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase = SamConfig(
vision_config=A , )
elif "sam_vit_h" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase = SamConfig(
vision_config=A , )
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = replace_keys(A)
UpperCamelCase = SamImageProcessor()
UpperCamelCase = SamProcessor(image_processor=A)
UpperCamelCase = SamModel(A)
hf_model.load_state_dict(A)
UpperCamelCase = hf_model.to('cuda')
UpperCamelCase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw).convert('RGB')
UpperCamelCase = [[[400, 650]]]
UpperCamelCase = [[1]]
UpperCamelCase = processor(images=np.array(A) , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase = hf_model(**A)
UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
UpperCamelCase = processor(
images=np.array(A) , input_points=A , input_labels=A , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase = hf_model(**A)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
UpperCamelCase = ((75, 275, 1725, 850),)
UpperCamelCase = processor(images=np.array(A) , input_boxes=A , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase = hf_model(**A)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
UpperCamelCase = [[[400, 650], [800, 650]]]
UpperCamelCase = [[1, 1]]
UpperCamelCase = processor(
images=np.array(A) , input_points=A , input_labels=A , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase = hf_model(**A)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
lowerCAmelCase : List[str] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowerCAmelCase : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 3
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Any:
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
A__ = BlipaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def snake_case__ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> List[str]:
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> str:
A__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = self.prepare_image_inputs()
A__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 104
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=10 , lowerCamelCase : Union[str, Any]=[10, 20, 30, 40] , lowerCamelCase : List[Any]=[1, 1, 2, 1] , lowerCamelCase : List[Any]=True , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="relu" , lowerCamelCase : List[str]=3 , lowerCamelCase : Dict=None , ):
"""simple docstring"""
__A : Optional[Any] = parent
__A : Any = batch_size
__A : int = image_size
__A : Any = num_channels
__A : Union[str, Any] = embeddings_size
__A : int = hidden_sizes
__A : str = depths
__A : Dict = is_training
__A : Optional[Any] = use_labels
__A : Optional[int] = hidden_act
__A : Union[str, Any] = num_labels
__A : str = scope
__A : List[str] = len(_lowerCAmelCase )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : str = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase_( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__A : Dict = TFResNetModel(config=_lowerCAmelCase )
__A : Any = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_( self : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
"""simple docstring"""
__A : Any = self.num_labels
__A : int = TFResNetForImageClassification(_lowerCAmelCase )
__A : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_( self : List[str] ):
"""simple docstring"""
__A : List[str] = self.prepare_config_and_inputs()
__A , __A , __A : Optional[int] = config_and_inputs
__A : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCamelCase =(
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
def lowercase_( self : str ):
"""simple docstring"""
__A : str = TFResNetModelTester(self )
__A : List[str] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowercase_( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_( self : Tuple ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowercase_( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowercase_( self : List[str] ):
"""simple docstring"""
pass
def lowercase_( self : Dict ):
"""simple docstring"""
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_lowerCAmelCase )
__A : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[int] = [*signature.parameters.keys()]
__A : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowercase_( self : Tuple ):
"""simple docstring"""
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowercase_( self : Any ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Dict ):
__A : Tuple = model_class(_lowerCAmelCase )
__A : Optional[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__A : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : Dict = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A : str = layer_type
__A : Optional[int] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowercase_( self : Dict ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[int] = TFResNetModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A_ ( ) -> Any:
"""simple docstring"""
__A : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase_( self : Tuple ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_( self : Tuple ):
"""simple docstring"""
__A : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A : List[str] = self.default_image_processor
__A : Any = prepare_img()
__A : str = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__A : Any = model(**_lowerCAmelCase )
# verify the logits
__A : List[str] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__A : Tuple = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1e-4 ) )
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A__ : Dict =logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =['''input_values''', '''padding_mask''']
def __init__( self : Optional[int] , lowerCamelCase : int = 1 , lowerCamelCase : int = 2_40_00 , lowerCamelCase : float = 0.0 , lowerCamelCase : float = None , lowerCamelCase : float = None , **lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase )
__A : str = chunk_length_s
__A : str = overlap
@property
def lowercase_( self : List[str] ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_( self : List[str] ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , lowerCamelCase : Optional[bool] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[int] = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
__A : List[Any] = True
__A : List[str] = bool(
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__A : Optional[Any] = [np.asarray(lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__A : Union[str, Any] = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__A : Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__A : Tuple = [np.asarray(lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" )
__A : Optional[Any] = None
__A : Dict = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__A : Optional[Any] = min(array.shape[0] for array in raw_audio )
__A : Dict = int(np.floor(max_length / self.chunk_stride ) )
__A : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__A : int = max(array.shape[0] for array in raw_audio )
__A : Optional[Any] = int(np.ceil(max_length / self.chunk_stride ) )
__A : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
__A : Optional[int] = """max_length"""
else:
__A : Optional[Any] = input_values
# normal padding on batch
if padded_inputs is None:
__A : Tuple = self.pad(
lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , padding=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if padding:
__A : List[Any] = padded_inputs.pop("""attention_mask""" )
__A : List[Any] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
__A : Optional[Any] = example[..., None]
input_values.append(example.T )
__A : str = input_values
if return_tensors is not None:
__A : Any = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 499
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class __a ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case_ : str)-> Union[str, Any]:
super().__init__(**snake_case_)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Any , snake_case_ : Union[str, List[str], "Image", List["Image"]] , **snake_case_ : Any)-> Optional[int]:
return super().__call__(snake_case_ , **snake_case_)
def UpperCamelCase ( self : List[Any] , **snake_case_ : str)-> Dict:
__lowerCAmelCase ={}
if "candidate_labels" in kwargs:
__lowerCAmelCase =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__lowerCAmelCase =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase ( self : Dict , snake_case_ : List[Any] , snake_case_ : int=None , snake_case_ : Union[str, Any]="This is a photo of {}.")-> List[str]:
__lowerCAmelCase =load_image(snake_case_)
__lowerCAmelCase =self.image_processor(images=[image] , return_tensors=self.framework)
__lowerCAmelCase =candidate_labels
__lowerCAmelCase =[hypothesis_template.format(snake_case_) for x in candidate_labels]
__lowerCAmelCase =self.tokenizer(snake_case_ , return_tensors=self.framework , padding=snake_case_)
__lowerCAmelCase =[text_inputs]
return inputs
def UpperCamelCase ( self : List[str] , snake_case_ : str)-> Dict:
__lowerCAmelCase =model_inputs.pop("""candidate_labels""")
__lowerCAmelCase =model_inputs.pop("""text_inputs""")
if isinstance(text_inputs[0] , snake_case_):
__lowerCAmelCase =text_inputs[0]
else:
# Batching case.
__lowerCAmelCase =text_inputs[0][0]
__lowerCAmelCase =self.model(**snake_case_ , **snake_case_)
__lowerCAmelCase ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : List[str])-> Optional[Any]:
__lowerCAmelCase =model_outputs.pop("""candidate_labels""")
__lowerCAmelCase =model_outputs["""logits"""][0]
if self.framework == "pt":
__lowerCAmelCase =logits.softmax(dim=-1).squeeze(-1)
__lowerCAmelCase =probs.tolist()
if not isinstance(snake_case_ , snake_case_):
__lowerCAmelCase =[scores]
elif self.framework == "tf":
__lowerCAmelCase =stable_softmax(snake_case_ , axis=-1)
__lowerCAmelCase =probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
__lowerCAmelCase =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case_ , snake_case_) , key=lambda snake_case_: -x[0])
]
return result
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = "utf-8"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 699
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a: List[str] = logging.get_logger(__name__)
# General docstring
__a: List[Any] = """RegNetConfig"""
# Base docstring
__a: Any = """facebook/regnet-y-040"""
__a: List[str] = [1, 10_88, 7, 7]
# Image classification docstring
__a: Optional[Any] = """facebook/regnet-y-040"""
__a: Dict = """tabby, tabby cat"""
__a: List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ : Tuple = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase__ : Optional[Any] = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=a_ , strides=a_ , padding='''VALID''' , groups=a_ , use_bias=a_ , name='''convolution''' , )
lowercase__ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
lowercase__ : int = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.convolution(self.padding(a_ ) )
lowercase__ : Optional[Any] = self.normalization(a_ )
lowercase__ : Dict = self.activation(a_ )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> str:
super().__init__(**a_ )
lowercase__ : Optional[int] = config.num_channels
lowercase__ : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : str = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ : int = tf.transpose(a_ , perm=(0, 2, 3, 1) )
lowercase__ : Optional[int] = self.embedder(a_ )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , **__lowerCAmelCase ) -> int:
super().__init__(**a_ )
lowercase__ : Optional[int] = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name='''convolution''' )
lowercase__ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(a_ ) , training=a_ )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
super().__init__(**a_ )
lowercase__ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
lowercase__ : Dict = [
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : Union[str, Any] = self.pooler(a_ )
for layer_module in self.attention:
lowercase__ : Optional[Any] = layer_module(a_ )
lowercase__ : Dict = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , **__lowerCAmelCase ) -> str:
super().__init__(**a_ )
lowercase__ : List[str] = in_channels != out_channels or stride != 1
lowercase__ : int = max(1 , out_channels // config.groups_width )
lowercase__ : Optional[int] = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ : List[str] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.2''' ),
]
lowercase__ : Optional[int] = ACTaFN[config.hidden_act]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : Any = hidden_state
for layer_module in self.layers:
lowercase__ : Dict = layer_module(a_ )
lowercase__ : Dict = self.shortcut(a_ )
hidden_state += residual
lowercase__ : List[Any] = self.activation(a_ )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , **__lowerCAmelCase ) -> int:
super().__init__(**a_ )
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__ : Any = (
TFRegNetShortCut(a_ , stride=a_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase__ : Union[str, Any] = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name='''layer.3''' ),
]
lowercase__ : Optional[int] = ACTaFN[config.hidden_act]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : Optional[Any] = hidden_state
for layer_module in self.layers:
lowercase__ : Dict = layer_module(a_ )
lowercase__ : Optional[Any] = self.shortcut(a_ )
hidden_state += residual
lowercase__ : str = self.activation(a_ )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , **__lowerCAmelCase ) -> List[str]:
super().__init__(**a_ )
lowercase__ : List[str] = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase__ : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , a_ , stride=a_ , name='''layers.0''' ),
*[layer(a_ , a_ , a_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
for layer_module in self.layers:
lowercase__ : List[str] = layer_module(a_ )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
super().__init__(**a_ )
lowercase__ : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase__ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=F"""stages.{i+1}""" ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
lowercase__ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : Optional[Any] = hidden_states + (hidden_state,)
lowercase__ : Optional[Any] = stage_module(a_ )
if output_hidden_states:
lowercase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
super().__init__(**a_ )
lowercase__ : List[str] = config
lowercase__ : List[str] = TFRegNetEmbeddings(a_ , name='''embedder''' )
lowercase__ : Optional[Any] = TFRegNetEncoder(a_ , name='''encoder''' )
lowercase__ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name='''pooler''' )
@unpack_inputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : str = self.embedder(a_ , training=a_ )
lowercase__ : Dict = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowercase__ : Tuple = encoder_outputs[0]
lowercase__ : str = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
lowercase__ : str = tf.transpose(a_ , perm=(0, 3, 1, 2) )
lowercase__ : Optional[int] = tf.transpose(a_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ : Optional[int] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = RegNetConfig
SCREAMING_SNAKE_CASE = """regnet"""
SCREAMING_SNAKE_CASE = """pixel_values"""
@property
def _lowerCAmelCase( self ) -> Dict:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a: Tuple = R"""\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"""
__a: Optional[int] = R"""\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCamelCase_ , )
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
super().__init__(a_ , *a_ , **a_ )
lowercase__ : Optional[Any] = TFRegNetMainLayer(a_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Tuple = self.regnet(
pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCamelCase_ , )
class UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
super().__init__(a_ , *a_ , **a_ )
lowercase__ : Tuple = config.num_labels
lowercase__ : List[str] = TFRegNetMainLayer(a_ , name='''regnet''' )
# classification head
lowercase__ : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Dict = self.regnet(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowercase__ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier[0](a_ )
lowercase__ : str = self.classifier[1](a_ )
lowercase__ : List[str] = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ )
if not return_dict:
lowercase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 152
|
"""simple docstring"""
def A_ ( lowercase ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def A_ ( lowercase ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = number
while duplicate > 0:
UpperCAmelCase_ ,UpperCAmelCase_ : Tuple = divmod(lowercase , 10 )
fact_sum += factorial(lowercase )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowercase_ = int(input("Enter number: ").strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 470
| 0
|
import requests
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"""Content-Type""": """application/json"""}
_SCREAMING_SNAKE_CASE = requests.post(SCREAMING_SNAKE_CASE_ , json={"""text""": message_body} , headers=SCREAMING_SNAKE_CASE_ )
if response.status_code != 2_00:
_SCREAMING_SNAKE_CASE = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 706
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(SCREAMING_SNAKE_CASE_ ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [float("""inf""" )] * vertex_count
_SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_SCREAMING_SNAKE_CASE = distance[u] + w
_SCREAMING_SNAKE_CASE = check_negative_cycle(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : int = int(input("Enter number of vertices: ").strip())
UpperCamelCase__ : int = int(input("Enter number of edges: ").strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase__ : Optional[Any] = {"src": src, "dst": dest, "weight": weight}
UpperCamelCase__ : Optional[Any] = int(input("\nEnter shortest path source:").strip())
UpperCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : str ) -> YolosConfig:
"""simple docstring"""
_lowerCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_lowerCAmelCase = 192
_lowerCAmelCase = 768
_lowerCAmelCase = 12
_lowerCAmelCase = 3
_lowerCAmelCase = [800, 1333]
_lowerCAmelCase = False
elif yolos_name == "yolos_s_dWr":
_lowerCAmelCase = 330
_lowerCAmelCase = 14
_lowerCAmelCase = 6
_lowerCAmelCase = 1320
elif "yolos_s" in yolos_name:
_lowerCAmelCase = 384
_lowerCAmelCase = 1536
_lowerCAmelCase = 12
_lowerCAmelCase = 6
elif "yolos_b" in yolos_name:
_lowerCAmelCase = [800, 1344]
_lowerCAmelCase = 91
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """coco-detection-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( snake_case_ : dict , snake_case_ : YolosConfig , snake_case_ : bool = False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[-config.hidden_size :, :]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( snake_case_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
_lowerCAmelCase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_lowerCAmelCase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_lowerCAmelCase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_lowerCAmelCase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_lowerCAmelCase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_lowerCAmelCase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def __UpperCAmelCase ( snake_case_ : dict , snake_case_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[2] )
_lowerCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : bool = False ) -> Any:
"""simple docstring"""
_lowerCAmelCase = get_yolos_config(snake_case_ )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_lowerCAmelCase = YolosForObjectDetection(snake_case_ )
model.eval()
_lowerCAmelCase = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by YolosImageProcessor
_lowerCAmelCase = 800 if yolos_name != """yolos_ti""" else 512
_lowerCAmelCase = YolosImageProcessor(format="""coco_detection""" , size=snake_case_ )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCAmelCase = model(**snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = outputs.logits, outputs.pred_boxes
_lowerCAmelCase , _lowerCAmelCase = None, None
if yolos_name == "yolos_ti":
_lowerCAmelCase = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
_lowerCAmelCase = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
_lowerCAmelCase = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
_lowerCAmelCase = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
_lowerCAmelCase = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
_lowerCAmelCase = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
_lowerCAmelCase = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
_lowerCAmelCase = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
_lowerCAmelCase = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
_lowerCAmelCase = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
_lowerCAmelCase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_lowerCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case_ , organization="""hustvl""" )
model.push_to_hub(snake_case_ , organization="""hustvl""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 156
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( snake_case_ : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
_lowerCAmelCase = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_lowerCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( snake_case_ : dict ) -> dict:
"""simple docstring"""
_lowerCAmelCase = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_lowerCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCAmelCase = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_lowerCAmelCase = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = """, """.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE : List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
SCREAMING_SNAKE_CASE : Tuple = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('''\n'''.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 156
| 1
|
'''simple docstring'''
from collections.abc import Generator
def A():
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1
while True:
lowerCAmelCase_ , lowerCAmelCase_ = b, a + b
yield b
def A(__a: int = 1000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 720
|
from functools import lru_cache
def A(__a: int ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__a )
if n > 1:
factors.add(__a )
return factors
@lru_cache
def A(__a: int ):
return len(unique_prime_factors(__a ) )
def A(__a: list ):
return len(set(__a ) ) in (0, 1)
def A(__a: int ):
lowerCAmelCase_ = 2
while True:
# Increment each value of a generated range
lowerCAmelCase_ = [base + i for i in range(__a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase_ = [upf_len(__a ) for x in group]
checker.append(__a )
# If all numbers in the list are equal, return the group variable.
if equality(__a ):
return group
# Increment our base variable by 1
base += 1
def A(__a: int = 4 ):
lowerCAmelCase_ = run(__a )
return results[0] if len(__a ) else None
if __name__ == "__main__":
print(solution())
| 226
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCamelCase = (720, 1280) # Height, Width
_UpperCamelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCamelCase = 1 / 100
_UpperCamelCase = ""
_UpperCamelCase = ""
_UpperCamelCase = ""
_UpperCamelCase = 250
def _lowercase ( ):
__lowerCAmelCase, __lowerCAmelCase : List[Any] = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
__lowerCAmelCase : List[Any] = random.sample(range(len(lowercase__ ) ) , 4 )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCAmelCase : Dict = random_chars(3_2 )
__lowerCAmelCase : List[Any] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowerCAmelCase : str = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCAmelCase : Union[str, Any] = []
for anno in new_annos:
__lowerCAmelCase : List[Any] = anno[3] - anno[1]
__lowerCAmelCase : Tuple = anno[4] - anno[2]
__lowerCAmelCase : Tuple = anno[1] + width / 2
__lowerCAmelCase : str = anno[2] + height / 2
__lowerCAmelCase : str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase__ )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : int = []
__lowerCAmelCase : int = []
for label_file in glob.glob(os.path.join(lowercase__ , '''*.txt''' ) ):
__lowerCAmelCase : List[str] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase__ ) as in_file:
__lowerCAmelCase : Optional[Any] = in_file.readlines()
__lowerCAmelCase : List[str] = os.path.join(lowercase__ , f"""{label_name}.jpg""" )
__lowerCAmelCase : List[str] = []
for obj_list in obj_lists:
__lowerCAmelCase : Optional[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
__lowerCAmelCase : str = float(obj[1] ) - float(obj[3] ) / 2
__lowerCAmelCase : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
__lowerCAmelCase : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCAmelCase : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 0.0 , ):
__lowerCAmelCase : int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCAmelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCAmelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCAmelCase : str = int(scale_x * output_size[1] )
__lowerCAmelCase : int = int(scale_y * output_size[0] )
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = []
for i, index in enumerate(lowercase__ ):
__lowerCAmelCase : Optional[Any] = all_img_list[index]
path_list.append(lowercase__ )
__lowerCAmelCase : Optional[int] = all_annos[index]
__lowerCAmelCase : int = cva.imread(lowercase__ )
if i == 0: # top-left
__lowerCAmelCase : Dict = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
__lowerCAmelCase : Tuple = img
for bbox in img_annos:
__lowerCAmelCase : Any = bbox[1] * scale_x
__lowerCAmelCase : Tuple = bbox[2] * scale_y
__lowerCAmelCase : List[Any] = bbox[3] * scale_x
__lowerCAmelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCAmelCase : Tuple = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCAmelCase : Union[str, Any] = img
for bbox in img_annos:
__lowerCAmelCase : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCAmelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCAmelCase : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x)
__lowerCAmelCase : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCAmelCase : Any = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCAmelCase : Union[str, Any] = img
for bbox in img_annos:
__lowerCAmelCase : Union[str, Any] = bbox[1] * scale_x
__lowerCAmelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCAmelCase : int = bbox[3] * scale_x
__lowerCAmelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCAmelCase : List[Any] = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCAmelCase : Tuple = img
for bbox in img_annos:
__lowerCAmelCase : List[str] = scale_x + bbox[1] * (1 - scale_x)
__lowerCAmelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCAmelCase : Any = scale_x + bbox[3] * (1 - scale_x)
__lowerCAmelCase : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCAmelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowercase ( lowercase__ ):
assert number_char > 1, "The number of character should greater than 1"
__lowerCAmelCase : str = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 492
|
from __future__ import annotations
import math
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(lowercase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
def _lowercase ( ):
__lowerCAmelCase : Any = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__lowerCAmelCase : int = math.log(len(lowercase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[str] = "microsoft/speecht5_tts"
_A : Optional[int] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_A : Dict = "text_reader"
_A : Tuple = SpeechTaProcessor
_A : str = SpeechTaForTextToSpeech
_A : List[Any] = SpeechTaHifiGan
_A : Tuple = ["text"]
_A : str = ["audio"]
def __A ( self : Any ) -> int:
'''simple docstring'''
if self.post_processor is None:
SCREAMING_SNAKE_CASE__ ="""microsoft/speecht5_hifigan"""
super().setup()
def __A ( self : List[str] ,_UpperCamelCase : List[str] ,_UpperCamelCase : int=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.pre_processor(text=_UpperCamelCase ,return_tensors="""pt""" ,truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
SCREAMING_SNAKE_CASE__ =load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
SCREAMING_SNAKE_CASE__ =torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __A ( self : Dict ,_UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def __A ( self : Optional[Any] ,_UpperCamelCase : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 588
|
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase_ ( __UpperCamelCase ):
@wraps(__UpperCamelCase )
def _inner_fn(*__UpperCamelCase, **__UpperCamelCase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future."""), __UpperCamelCase, )
return fn(*__UpperCamelCase, **__UpperCamelCase )
return _inner_fn
| 588
| 1
|
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""", [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""", ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple, _lowerCAmelCase : List[str] ) -> Union[str, Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, """IN_MEMORY_MAX_SIZE""", _lowerCAmelCase )
_UpperCAmelCase : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_UpperCAmelCase : Optional[Any] = dataset_size < in_memory_max_size
else:
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[str] = is_small_dataset(_lowerCAmelCase )
assert result == expected
| 238
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [True] * 1_00_00_01
lowerCamelCase__ : List[Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCamelCase__ : Optional[Any] = False
i += 1
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return seive[n]
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
return any(digit in """02468""" for digit in str(_lowerCAmelCase ) )
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> list[int]:
_UpperCAmelCase : List[Any] = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ):
_UpperCAmelCase : List[Any] = str(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )]
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
result.append(_lowerCAmelCase )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 238
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__magic_name__ = '''canine'''
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_6384 , snake_case__=16 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__=0xE0_00 , snake_case__=0xE0_01 , snake_case__=4 , snake_case__=4 , snake_case__=8 , snake_case__=1_6384 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : List[str] = layer_norm_eps
# Character config:
_lowerCAmelCase : List[Any] = downsampling_rate
_lowerCAmelCase : Union[str, Any] = upsampling_kernel_size
_lowerCAmelCase : Optional[Any] = num_hash_functions
_lowerCAmelCase : str = num_hash_buckets
_lowerCAmelCase : int = local_transformer_stride
| 707
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630
| 0
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = sorted(zip(lowercase , lowercase ) , key=lambda lowercase : x[0] / x[1] , reverse=lowercase )
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ : int = list(accumulate(lowercase ) )
UpperCAmelCase_ : Optional[Any] = bisect(lowercase , lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470
| 1
|
import functools
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case__ = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPFeatureExtractor''']
__magic_name__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 530
| 0
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : int =OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__lowerCAmelCase : int =OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : Any =OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : Union[str, Any] =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__lowerCAmelCase : Optional[int] =OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__lowerCAmelCase : int =OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__lowerCAmelCase : Optional[Any] =OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__lowerCAmelCase : str =OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__lowerCAmelCase : Dict =OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__lowerCAmelCase : Optional[Any] =OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__lowerCAmelCase : Optional[int] =OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__lowerCAmelCase : Union[str, Any] =OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__lowerCAmelCase : List[Any] =OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__lowerCAmelCase : Dict =OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__lowerCAmelCase : Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCAmelCase : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCAmelCase : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCAmelCase : Union[str, Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCAmelCase : int =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : int =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCAmelCase : Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCAmelCase : List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : str =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCAmelCase : List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCAmelCase : Tuple =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCAmelCase : Tuple =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCAmelCase : Union[str, Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_MAPPING
__lowerCAmelCase : str =auto_class_update(FlaxAutoModel)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCAmelCase : Union[str, Any] =auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase : List[str] =auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : str =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase : Dict =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Any =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCAmelCase : Union[str, Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCAmelCase : Dict =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCAmelCase : Tuple =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[int] =auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCAmelCase : int =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class UpperCAmelCase ( _BaseAutoModelClass ):
__lowercase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCAmelCase : Any =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 440
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv3ImageProcessor"""
__lowerCAmelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Dict , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase_ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features["""words"""]
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(lowerCamelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
return images_with_overflow
def lowerCamelCase_ ( self : Union[str, Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase_ , )
return self.image_processor
| 537
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] , __lowerCamelCase :int ):
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCamelCase ):
_lowerCAmelCase = burst_time[i]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 999999999
_lowerCAmelCase = 0
_lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase = remaining_time[j]
_lowerCAmelCase = j
_lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase = remaining_time[short]
if minm == 0:
_lowerCAmelCase = 999999999
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase = False
# Find finish time of current process
_lowerCAmelCase = increment_time + 1
# Calculate waiting time
_lowerCAmelCase = finish_time - arrival_time[short]
_lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def A (__lowerCamelCase :list[int] , __lowerCamelCase :int , __lowerCamelCase :list[int] ):
_lowerCAmelCase = [0] * no_of_processes
for i in range(__lowerCamelCase ):
_lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] , __lowerCamelCase :int ):
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
_lowerCAmelCase = total_waiting_time + waiting_time[i]
_lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_lowercase = int(input())
_lowercase = [0] * no_of_processes
_lowercase = [0] * no_of_processes
_lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_lowercase , _lowercase = map(int, input().split())
_lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowercase = burst_time
_lowercase = no_of_processes
_lowercase = waiting_time
_lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 717
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :str ):
_lowerCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase = 128
elif "12-12" in model_name:
_lowerCAmelCase = 12
_lowerCAmelCase = 12
elif "14-14" in model_name:
_lowerCAmelCase = 14
_lowerCAmelCase = 14
elif "16-16" in model_name:
_lowerCAmelCase = 16
_lowerCAmelCase = 16
else:
raise ValueError("""Model not supported""" )
_lowerCAmelCase = """huggingface/label-files"""
if "speech-commands" in model_name:
_lowerCAmelCase = 35
_lowerCAmelCase = """speech-commands-v2-id2label.json"""
else:
_lowerCAmelCase = 527
_lowerCAmelCase = """audioset-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def A (__lowerCamelCase :Dict ):
if "module.v" in name:
_lowerCAmelCase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
_lowerCAmelCase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def A (__lowerCamelCase :str ):
_lowerCAmelCase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :List[Any]=False ):
_lowerCAmelCase = get_audio_spectrogram_transformer_config(__lowerCamelCase )
_lowerCAmelCase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )
# remove some keys
remove_keys(__lowerCamelCase )
# rename some keys
_lowerCAmelCase = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
# load 🤗 model
_lowerCAmelCase = ASTForAudioClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
_lowerCAmelCase = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
_lowerCAmelCase = 1024 if """speech-commands""" not in model_name else 128
_lowerCAmelCase = ASTFeatureExtractor(mean=__lowerCamelCase , std=__lowerCamelCase , max_length=__lowerCamelCase )
if "speech-commands" in model_name:
_lowerCAmelCase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
_lowerCAmelCase = dataset[0]["""audio"""]["""array"""]
else:
_lowerCAmelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
_lowerCAmelCase , _lowerCAmelCase = torchaudio.load(__lowerCamelCase )
_lowerCAmelCase = waveform.squeeze().numpy()
_lowerCAmelCase = feature_extractor(__lowerCamelCase , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
_lowerCAmelCase = model(**__lowerCamelCase )
_lowerCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 162
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> Dict:
UpperCAmelCase_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : List[str] = min_resolution
UpperCAmelCase_ : Any = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : str = do_normalize
def a__ ( self ) -> Optional[int]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> int:
UpperCAmelCase_ : Dict = ImageGPTImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''clusters''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def a__ ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Tuple = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,obj[key] ) )
else:
self.assertEqual(obj[key] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE ,'''image_processor.json''' )
image_processor_first.to_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_json_file(_SCREAMING_SNAKE_CASE ).to_dict()
UpperCAmelCase_ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.image_processing_class.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
UpperCAmelCase_ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def a__ ( self ) -> str:
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
UpperCAmelCase_ : Optional[int] = Image.open(dataset[4]['''file'''] )
UpperCAmelCase_ : Any = Image.open(dataset[5]['''file'''] )
UpperCAmelCase_ : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
UpperCAmelCase_ : Any = prepare_images()
# test non-batched
UpperCAmelCase_ : Tuple = image_processing(images[0] ,return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1_024) )
UpperCAmelCase_ : str = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,_SCREAMING_SNAKE_CASE )
# test batched
UpperCAmelCase_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1_024) )
UpperCAmelCase_ : List[str] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,_SCREAMING_SNAKE_CASE )
| 30
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LEDTokenizer
_lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : Optional[Any] = add_prefix_space
a__ : List[str] = pre_tok_class(**lowerCamelCase__ )
a__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Any = "post_processor"
a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
a__ : Optional[Any] = tuple(state["cls"] )
a__ : Optional[int] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Dict = add_prefix_space
a__ : int = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : List[Any] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : int = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : Union[str, Any] = value
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ):
a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[str] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : str = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
a__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
"""simple docstring"""
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
A = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
A = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
A = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not nums:
return 0
A = nums[0]
A = 0
for num in nums[1:]:
A , A = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , a : Dict , a : Optional[int]=7 , a : Tuple=3 , a : Tuple=18 , a : Tuple=30 , a : List[Any]=400 , a : str=True , a : Optional[Any]=None , a : Union[str, Any]=True , a : List[str]=None , a : Dict=True , ) -> Dict:
"""simple docstring"""
lowercase = size if size is not None else {'''shortest_edge''': 20}
lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
__lowerCAmelCase : str = MobileViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase = MobileViTImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , '''do_resize''' ) )
self.assertTrue(hasattr(a , '''size''' ) )
self.assertTrue(hasattr(a , '''do_center_crop''' ) )
self.assertTrue(hasattr(a , '''center_crop''' ) )
self.assertTrue(hasattr(a , '''do_flip_channel_order''' ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 396
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 396
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
A__ : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
A__ : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
A__ : str = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A__ : Union[str, Any] = False
@property
def snake_case_ ( self : Optional[int] ):
return 32
@property
def snake_case_ ( self : int ):
return 32
@property
def snake_case_ ( self : Tuple ):
return self.time_input_dim
@property
def snake_case_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def snake_case_ ( self : Dict ):
return 100
@property
def snake_case_ ( self : List[Any] ):
torch.manual_seed(0 )
__lowercase : Optional[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowercase : List[str] = UNetaDConditionModel(**_snake_case )
return model
@property
def snake_case_ ( self : Any ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self : Tuple ):
torch.manual_seed(0 )
__lowercase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self : Dict ):
__lowercase : Union[str, Any] = self.dummy_unet
__lowercase : Dict = self.dummy_movq
__lowercase : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowercase : str = DDIMScheduler(**_snake_case )
__lowercase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : Any=0 ):
__lowercase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowercase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
__lowercase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : List[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
__lowercase : Any = torch.manual_seed(_snake_case )
else:
__lowercase : Optional[int] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowercase : Optional[int] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def snake_case_ ( self : List[Any] ):
__lowercase : int = "cpu"
__lowercase : int = self.get_dummy_components()
__lowercase : Optional[int] = self.pipeline_class(**_snake_case )
__lowercase : Tuple = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_snake_case ) )
__lowercase : Dict = output.images
__lowercase : str = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
__lowercase : str = image[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase : List[Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : List[str] ):
__lowercase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__lowercase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__lowercase : Optional[Any] = init_image.resize((512, 512) )
__lowercase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__lowercase : List[str] = torch.from_numpy(np.array(_snake_case ) ).float() / 2_55.0
__lowercase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowercase : Tuple = "A robot, 4k photo"
__lowercase : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
__lowercase : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__lowercase : Optional[int] = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
__lowercase : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase : Tuple = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt='''''' , ).to_tuple()
__lowercase : List[Any] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__lowercase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 509
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowercase ( a , a , a , a , a , a , a ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ :int = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ :Dict = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ :str = Pipe()
SCREAMING_SNAKE_CASE_ :Optional[Any] = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = temp_rs
SCREAMING_SNAKE_CASE_ :Any = temp_rr
for i in range(1 , len(a ) - 1 ):
SCREAMING_SNAKE_CASE_ :int = Pipe()
SCREAMING_SNAKE_CASE_ :Dict = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp_rs
SCREAMING_SNAKE_CASE_ :int = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
SCREAMING_SNAKE_CASE_ :Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*a )
SCREAMING_SNAKE_CASE_ :int = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 631
| 0
|
import math
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(a )
SCREAMING_SNAKE_CASE_ :Any = int(math.floor(math.sqrt(a ) ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
while arr[min(a , a ) - 1] < x:
SCREAMING_SNAKE_CASE_ :int = step
step += int(math.floor(math.sqrt(a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE_ :Any = prev + 1
if prev == min(a , a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(",")]
SCREAMING_SNAKE_CASE__ = int(input("Enter the number to be searched:\n"))
SCREAMING_SNAKE_CASE__ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 717
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Collection[float] | None = None):
if components is None:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :Optional[int] = list(UpperCAmelCase)
def __len__( self : Optional[Any]):
return len(self.__components)
def __str__( self : List[Any]):
return "(" + ",".join(map(UpperCAmelCase , self.__components)) + ")"
def __add__( self : Optional[int] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :List[str] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.__components[i] + other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else:
raise Exception("must have the same size")
def __sub__( self : List[str] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = [self.__components[i] - other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float):
...
@overload
def __mul__( self : int , UpperCAmelCase : Vector):
...
def __mul__( self : int , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , (float, int)):
SCREAMING_SNAKE_CASE_ :Tuple = [c * other for c in self.__components]
return Vector(UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and len(self) == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self)
SCREAMING_SNAKE_CASE_ :str = [self.__components[i] * other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return sum(UpperCAmelCase)
else: # error case
raise Exception("invalid operand!")
def _snake_case ( self : Any):
return Vector(self.__components)
def _snake_case ( self : str , UpperCAmelCase : int):
if isinstance(UpperCAmelCase , UpperCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : float):
assert -len(self.__components) <= pos < len(self.__components)
SCREAMING_SNAKE_CASE_ :List[str] = value
def _snake_case ( self : str):
if len(self.__components) == 0:
raise Exception("Vector is empty")
SCREAMING_SNAKE_CASE_ :Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase))
def _snake_case ( self : str , UpperCAmelCase : Vector , UpperCAmelCase : bool = False):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self * other
SCREAMING_SNAKE_CASE_ :Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowercase ( a ):
'''simple docstring'''
assert isinstance(a , a )
return Vector([0] * dimension )
def lowercase ( a , a ):
'''simple docstring'''
assert isinstance(a , a ) and (isinstance(a , a ))
SCREAMING_SNAKE_CASE_ :str = [0] * dimension
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
return Vector(a )
def lowercase ( a , a , a ):
'''simple docstring'''
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def lowercase ( a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :int = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :str = matrix
SCREAMING_SNAKE_CASE_ :List[Any] = w
SCREAMING_SNAKE_CASE_ :List[Any] = h
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :Any = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[i][j] + other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__( self : Tuple , UpperCAmelCase : float):
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector):
...
def __mul__( self : List[str] , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , UpperCAmelCase): # matrix-vector
if len(UpperCAmelCase) == self.__width:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = zero_vector(self.__height)
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCAmelCase)
for j in range(self.__width)
]
ans.change_component(UpperCAmelCase , sum(UpperCAmelCase))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!")
elif isinstance(UpperCAmelCase , (int, float)): # matrix-scalar
SCREAMING_SNAKE_CASE_ :Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(UpperCAmelCase , self.__width , self.__height)
return None
def _snake_case ( self : Optional[int]):
return self.__height
def _snake_case ( self : Optional[int]):
return self.__width
def _snake_case ( self : str , UpperCAmelCase : int , UpperCAmelCase : int):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ :Dict = value
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
SCREAMING_SNAKE_CASE_ :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase , UpperCAmelCase)
else:
raise Exception("Indices out of bounds")
def _snake_case ( self : Union[str, Any]):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase) for y in range(self.__width)
]
return sum(UpperCAmelCase)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def lowercase ( a , a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 140
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : int ,_snake_case : int ,_snake_case : float ,**_snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = feature_size
lowercase__ : List[str] = sampling_rate
lowercase__ : List[Any] = padding_value
lowercase__ : List[str] = kwargs.pop('''padding_side''' ,'''right''' )
lowercase__ : Dict = kwargs.pop('''return_attention_mask''' ,_snake_case )
super().__init__(**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,_snake_case : Union[bool, str, PaddingStrategy] = True ,_snake_case : Optional[int] = None ,_snake_case : bool = False ,_snake_case : Optional[int] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,) -> BatchFeature:
"""simple docstring"""
if isinstance(_snake_case ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
lowercase__ : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
lowercase__ : Tuple = processed_features[self.model_input_names[0]]
lowercase__ : Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_snake_case ) == 0:
if return_attention_mask:
lowercase__ : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase__ : List[str] = required_input[0]
if isinstance(_snake_case ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase__ : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_snake_case ):
lowercase__ : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_snake_case ):
lowercase__ : Optional[int] = '''tf'''
elif is_torch_tensor(_snake_case ):
lowercase__ : List[str] = '''pt'''
elif isinstance(_snake_case ,(int, float, list, tuple, np.ndarray) ):
lowercase__ : Optional[int] = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(_snake_case )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
lowercase__ : Optional[Any] = to_numpy(_snake_case )
else:
lowercase__ : Tuple = [to_numpy(_snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase__ : List[str] = self._get_padding_strategies(padding=_snake_case ,max_length=_snake_case )
lowercase__ : Dict = processed_features[self.model_input_names[0]]
lowercase__ : Dict = len(_snake_case )
if not all(len(_snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
lowercase__ : str = []
for i in range(_snake_case ):
lowercase__ : Union[str, Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase__ : Tuple = self._truncate(
_snake_case ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,truncation=_snake_case ,)
truncated_inputs.append(_snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase__ : Dict = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase__ : Tuple = PaddingStrategy.MAX_LENGTH
lowercase__ : Tuple = {}
for i in range(_snake_case ):
# padding
lowercase__ : Tuple = self._pad(
truncated_inputs[i] ,max_length=_snake_case ,padding_strategy=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,)
for key, value in outputs.items():
if key not in batch_outputs:
lowercase__ : List[str] = []
if value.dtype is np.dtype(np.floataa ):
lowercase__ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(_snake_case )
return BatchFeature(_snake_case ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : Union[Dict[str, np.ndarray], BatchFeature] ,_snake_case : Optional[int] = None ,_snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,_snake_case : Optional[int] = None ,_snake_case : Optional[bool] = None ,) -> dict:
"""simple docstring"""
lowercase__ : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase__ : Optional[Any] = len(_snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase__ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase__ : List[str] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase__ : Optional[int] = np.ones(len(_snake_case ) ,dtype=np.intaa )
if needs_to_be_padded:
lowercase__ : Optional[Any] = max_length - len(_snake_case )
if self.padding_side == "right":
if return_attention_mask:
lowercase__ : Tuple = np.pad(
processed_features['''attention_mask'''] ,(0, difference) )
lowercase__ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase__ : Any = np.pad(
_snake_case ,_snake_case ,'''constant''' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase__ : List[Any] = np.pad(
processed_features['''attention_mask'''] ,(difference, 0) )
lowercase__ : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase__ : Optional[int] = np.pad(
_snake_case ,_snake_case ,'''constant''' ,constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[Dict[str, np.ndarray], BatchFeature] ,_snake_case : Optional[int] = None ,_snake_case : Optional[int] = None ,_snake_case : Optional[bool] = None ,) -> Union[str, Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
lowercase__ : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase__ : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase__ : Any = len(_snake_case ) > max_length
if needs_to_be_truncated:
lowercase__ : Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase__ : Union[str, Any] = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str=False ,_snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if padding is not False:
if padding is True:
lowercase__ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_snake_case ,_snake_case ):
lowercase__ : str = PaddingStrategy(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Tuple = padding
else:
lowercase__ : int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 560
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *_snake_case : Any ,**_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self : str ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = DepthEstimationPipeline(model=_snake_case ,image_processor=_snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,_snake_case )
import datasets
lowercase__ : str = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
lowercase__ : Union[str, Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,_snake_case ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = '''Intel/dpt-large'''
lowercase__ : Tuple = pipeline('''depth-estimation''' ,model=_snake_case )
lowercase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : Dict = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.662 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 560
| 1
|
import unittest
import numpy as np
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str = None , ):
lowercase__ : Tuple = np.shape(__snake_case )
lowercase__ : List[str] = np.shape(__snake_case )
lowercase__ : List[str] = np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
lowercase__ : Tuple = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
lowercase__ : List[str] = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__snake_case )
lowercase__ : Optional[int] = pseudo_inv
if a_inv is None:
try:
lowercase__ : Optional[Any] = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> None:
lowercase__ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ : int = np.array([[2, 1], [6, 3]] )
lowercase__ : Optional[int] = schur_complement(__a , __a , __a )
lowercase__ : Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowercase__ : Optional[Any] = np.linalg.det(__a )
lowercase__ : Optional[int] = np.linalg.det(__a )
lowercase__ : Any = np.linalg.det(__a )
self.assertAlmostEqual(__a , det_a * det_s )
def UpperCAmelCase__( self ) -> None:
lowercase__ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ : Any = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__a ):
schur_complement(__a , __a , __a )
def UpperCAmelCase__( self ) -> None:
lowercase__ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ : Dict = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__a ):
schur_complement(__a , __a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 712
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__snake_case = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__snake_case = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__snake_case = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
lowercase__ : List[Any] = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 128
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase_ ( nn.Module ):
_a : int
_a : jnp.dtype = jnp.floataa
def __a ( self : int ):
lowerCamelCase_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , lowerCamelCase : str ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str = hidden_states.shape
lowerCamelCase_ : str = jax.image.resize(
lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase_ : Optional[Any] = self.conv(lowerCamelCase )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_a : int
_a : jnp.dtype = jnp.floataa
def __a ( self : int ):
lowerCamelCase_ : Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase : Any ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase_ : Optional[int] = self.conv(lowerCamelCase )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
_a : int
_a : int = None
_a : float = 0.0
_a : bool = None
_a : jnp.dtype = jnp.floataa
def __a ( self : List[str] ):
lowerCamelCase_ : Tuple = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase_ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCamelCase_ : Union[str, Any] = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Tuple = nn.Dense(lowerCamelCase , dtype=self.dtype )
lowerCamelCase_ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCamelCase_ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase_ : Dict = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase_ : Any = None
if use_nin_shortcut:
lowerCamelCase_ : List[Any] = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : str , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : int=True ):
lowerCamelCase_ : List[str] = hidden_states
lowerCamelCase_ : Any = self.norma(lowerCamelCase )
lowerCamelCase_ : Tuple = nn.swish(lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = self.conva(lowerCamelCase )
lowerCamelCase_ : Optional[Any] = self.time_emb_proj(nn.swish(lowerCamelCase ) )
lowerCamelCase_ : Optional[int] = jnp.expand_dims(jnp.expand_dims(lowerCamelCase , 1 ) , 1 )
lowerCamelCase_ : Optional[Any] = hidden_states + temb
lowerCamelCase_ : Union[str, Any] = self.norma(lowerCamelCase )
lowerCamelCase_ : List[Any] = nn.swish(lowerCamelCase )
lowerCamelCase_ : Optional[int] = self.dropout(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
lowerCamelCase_ : Any = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 364
|
_lowercase : Optional[Any] ="""
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase : Union[str, Any] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase : List[Any] ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 364
| 1
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Tuple , _lowercase : Tuple , _lowercase : str ):
return f'gaussian_noise_s={seed}_shape={"_".join([str(_lowercase ) for s in shape] )}.npy'
def __a ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __a ( self : Dict , _lowercase : Any=0 , _lowercase : int=(4, 4, 64, 64) , _lowercase : Dict=False ):
A = jnp.bfloataa if fpaa else jnp.floataa
A = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase )
return image
def __a ( self : str , _lowercase : List[str]=False , _lowercase : Optional[int]="CompVis/stable-diffusion-v1-4" ):
A = jnp.bfloataa if fpaa else jnp.floataa
A = 'bf16' if fpaa else None
A , A = FlaxUNetaDConditionModel.from_pretrained(
_lowercase , subfolder='unet' , dtype=_lowercase , revision=_lowercase )
return model, params
def __a ( self : Tuple , _lowercase : Any=0 , _lowercase : Dict=(4, 77, 768) , _lowercase : List[str]=False ):
A = jnp.bfloataa if fpaa else jnp.floataa
A = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __a ( self : List[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : Optional[Any] ):
A , A = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=_lowercase )
A = self.get_latents(_lowercase , fpaa=_lowercase )
A = self.get_encoder_hidden_states(_lowercase , fpaa=_lowercase )
A = model.apply(
{'params': params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample
assert sample.shape == latents.shape
A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A = jnp.array(_lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __a ( self : int , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ):
A , A = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=_lowercase )
A = self.get_latents(_lowercase , shape=(4, 4, 96, 96) , fpaa=_lowercase )
A = self.get_encoder_hidden_states(_lowercase , shape=(4, 77, 1_024) , fpaa=_lowercase )
A = model.apply(
{'params': params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample
assert sample.shape == latents.shape
A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A = jnp.array(_lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 )
| 91
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def __a ( *_lowercase : int , **_lowercase : Optional[int] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __a ( self : int , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Union[str, Any] ):
A = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __a ( self : Tuple , _lowercase : int , _lowercase : Tuple ):
A = object_detector(examples[0] , threshold=0.0 )
A = len(_lowercase )
self.assertGreater(_lowercase , 0 )
self.assertEqual(
_lowercase , [
{
'score': ANY(_lowercase ),
'label': ANY(_lowercase ),
'box': {'xmin': ANY(_lowercase ), 'ymin': ANY(_lowercase ), 'xmax': ANY(_lowercase ), 'ymax': ANY(_lowercase )},
}
for i in range(_lowercase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Optional[int] ):
pass
@require_torch
def __a ( self : Optional[Any] ):
A = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
A = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def __a ( self : List[str] ):
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
A = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Optional[int] ):
pass
@require_torch
@slow
def __a ( self : Optional[Any] ):
A = 0.2
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def __a ( self : Optional[int] ):
A = 2
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 91
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE : Optional[int] = Features({'''audio''': Audio()} )
SCREAMING_SNAKE_CASE : str = Features({'''labels''': ClassLabel} )
SCREAMING_SNAKE_CASE : Optional[int] = '''audio'''
SCREAMING_SNAKE_CASE : Dict = '''labels'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase ( self : str ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 699
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0
| 0
|
'''simple docstring'''
import argparse
import datetime
def snake_case_ ( a__ : str ):
"""simple docstring"""
__lowercase = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__lowercase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case_ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
__lowercase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
__lowercase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
__lowercase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
__lowercase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
__lowercase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
__lowercase = datetime.date(int(snake_case_ ) ,int(snake_case_ ) ,int(snake_case_ ) )
# Start math
if m <= 2:
__lowercase = y - 1
__lowercase = m + 12
# maths var
__lowercase = int(str(snake_case_ )[:2] )
__lowercase = int(str(snake_case_ )[2:] )
__lowercase = int(2.6 * m - 5.3_9 )
__lowercase = int(c / 4 )
__lowercase = int(k / 4 )
__lowercase = int(d + k )
__lowercase = int(t + u + v + x )
__lowercase = int(z - (2 * c) )
__lowercase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
__lowercase = f'Your date {date_input}, is a {days[str(snake_case_ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
A = parser.parse_args()
zeller(args.date_input)
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Dict = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 163
| 0
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=37 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :Tuple=6 , lowerCAmelCase__ :Union[str, Any]=6 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str=1000 , ) ->List[str]:
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Any:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowercase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = tmp_coordinate
lowercase = tf.constant(SCREAMING_SNAKE_CASE__ )
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) ->int:
lowercase = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE__ )
# text + image
lowercase = model(SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowercase = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
lowercase = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase = model({"pixel_values": pixel_values} , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ) ->str:
lowercase = self.num_labels
lowercase = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowercase = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) ->Optional[int]:
lowercase = self.num_labels
lowercase = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowercase = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) ->List[str]:
lowercase = 2
lowercase = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowercase = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE( self :Tuple ) ->str:
lowercase = self.prepare_config_and_inputs()
(lowercase) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[int] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) ->Optional[int]:
return True
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=False ) ->List[str]:
lowercase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowercase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def SCREAMING_SNAKE_CASE( self :int ) ->Optional[int]:
lowercase = TFLayoutLMvaModelTester(self )
lowercase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->int:
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(SCREAMING_SNAKE_CASE__ )
if getattr(SCREAMING_SNAKE_CASE__ , "hf_compute_loss" , SCREAMING_SNAKE_CASE__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=SCREAMING_SNAKE_CASE__ )[0]
]
lowercase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase = prepared_for_class.pop("input_ids" )
lowercase = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowercase = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
lowercase = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowercase = -100
lowercase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
lowercase = model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowercase = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase = model(SCREAMING_SNAKE_CASE__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowercase = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
# Get keys that were added with the _prepare_for_class function
lowercase = prepared_for_class.keys() - inputs_dict.keys()
lowercase = inspect.signature(model.call ).parameters
lowercase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowercase = {0: '''input_ids'''}
for label_key in label_keys:
lowercase = signature_names.index(SCREAMING_SNAKE_CASE__ )
lowercase = label_key
lowercase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowercase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowercase = prepared_for_class[value]
lowercase = tuple(SCREAMING_SNAKE_CASE__ )
# Send to model
lowercase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def SCREAMING_SNAKE_CASE( self :str ) ->Dict:
(
lowercase
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE( self :str ) ->Tuple:
(
lowercase
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->List[Any]:
(
lowercase
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->int:
(
lowercase
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE( self :Dict ) ->List[str]:
(
lowercase
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE( self :str ) ->Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __snake_case ( ):
'''simple docstring'''
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
lowercase = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" ).pixel_values
lowercase = tf.constant([[1, 2]] )
lowercase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowercase = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , pixel_values=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowercase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
lowercase = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 441
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : List[Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
A__ : Optional[Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
A__ : List[Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _a ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
return float((preds == labels).mean() )
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple="binary" ):
lowerCAmelCase__ : Dict = simple_accuracy(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : List[Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ):
lowerCAmelCase__ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = f'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCAmelCase__ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Union[str, Any] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average='''macro''' )
fas.append(__UpperCamelCase )
lowerCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
lowerCAmelCase__ : Dict = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = sum(__UpperCamelCase ) / len(__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ : Union[str, Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ : Optional[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 233
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 546
|
import math
def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( UpperCamelCase__: float = 0.1 ) -> int:
"""simple docstring"""
A = 3
A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 1
|
import numpy as np
_snake_case : str = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class a :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
__snake_case : Optional[int] = np.array(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> np.ndarray:
__snake_case , __snake_case : Optional[int] = np.where(letter == self.SQUARE )
__snake_case : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int ) -> str:
__snake_case : Optional[int] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> str:
__snake_case : Dict = message.lower()
__snake_case : List[Any] = message.replace(" " , "" )
__snake_case : List[Any] = message.replace("j" , "i" )
__snake_case : Optional[Any] = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = self.letter_to_numbers(message[letter_index] )
__snake_case : Any = numbers[0]
__snake_case : Dict = numbers[1]
__snake_case : Optional[Any] = first_step.reshape(2 * len(lowerCamelCase ) )
__snake_case : str = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : str = int(second_step[numbers_index * 2] )
__snake_case : List[Any] = int(second_step[(numbers_index * 2) + 1] )
__snake_case : Optional[Any] = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = encoded_message + letter
return encoded_message
def __snake_case ( self : Any , lowerCamelCase : str ) -> str:
__snake_case : Tuple = message.lower()
message.replace(" " , "" )
__snake_case : Dict = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : str = self.letter_to_numbers(message[letter_index] )
__snake_case : Dict = numbers[0]
__snake_case : Union[str, Any] = numbers[1]
__snake_case : int = first_step.reshape((2, len(lowerCamelCase )) )
__snake_case : List[Any] = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = int(second_step[0, numbers_index] )
__snake_case : Optional[Any] = int(second_step[1, numbers_index] )
__snake_case : str = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = decoded_message + letter
return decoded_message
| 81
|
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a = int(input("""Enter number: """).strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 687
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
UpperCamelCase_ : Optional[int] = img
UpperCamelCase_ : Optional[Any] = img.shape[1]
UpperCamelCase_ : Tuple = img.shape[0]
UpperCamelCase_ : Dict = dst_width
UpperCamelCase_ : Tuple = dst_height
UpperCamelCase_ : Union[str, Any] = self.src_w / self.dst_w
UpperCamelCase_ : str = self.src_h / self.dst_h
UpperCamelCase_ : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def _UpperCAmelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCamelCase_ : Union[str, Any] = self.img[self.get_y(__lowerCAmelCase )][self.get_x(__lowerCAmelCase )]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return int(self.ratio_x * x )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase =800, 600
UpperCamelCase =imread("image_data/lena.jpg", 1)
UpperCamelCase =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCamelCase =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCamelCase =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCamelCase =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 543
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """spm_char.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
__lowerCAmelCase = {
"""microsoft/speecht5_asr""": 1_0_2_4,
"""microsoft/speecht5_tts""": 1_0_2_4,
"""microsoft/speecht5_vc""": 1_0_2_4,
}
class UpperCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] ,_a : Union[str, Any] ,_a : List[str]="<s>" ,_a : Tuple="</s>" ,_a : Tuple="<unk>" ,_a : int="<pad>" ,_a : Optional[Dict[str, Any]] = None ,**_a : List[str] ,):
'''simple docstring'''
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCAmelCase ,)
_a : Optional[Any] = vocab_file
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.__dict__.copy()
_a : Union[str, Any] = None
return state
def __setstate__( self : Any ,_a : List[str] ):
'''simple docstring'''
_a : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Optional[int] = {}
_a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : int ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase ,out_type=__lowerCAmelCase )
def __lowercase ( self : List[Any] ,_a : Tuple ):
'''simple docstring'''
return self.sp_model.piece_to_id(__lowerCAmelCase )
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : Any = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def __lowercase ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = []
_a : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_a : List[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : int=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowercase ( self : Union[str, Any] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
_a : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(__lowerCAmelCase )) + suffix_ones
return ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
__lowerCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase ,'wb' ) as fi:
_a : int = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 229
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'dpt'
def __init__( self : Any , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Dict=3072 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=1e-1_2 , __lowerCAmelCase : List[str]=384 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=[2, 5, 8, 11] , __lowerCAmelCase : Any="project" , __lowerCAmelCase : Dict=[4, 2, 1, 0.5] , __lowerCAmelCase : int=[96, 192, 384, 768] , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Optional[Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=0.4 , __lowerCAmelCase : List[str]=255 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=[1, 1024, 24, 24] , __lowerCAmelCase : List[Any]=[0, 1] , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : str , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_UpperCAmelCase = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 277
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
_a : str = BigBirdConfig.from_json_file(UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
_a : Optional[int] = BigBirdForQuestionAnswering(UpperCAmelCase )
else:
_a : Any = BigBirdForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase , UpperCAmelCase , is_trivia_qa=UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 307
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def UpperCamelCase__ ( UpperCAmelCase ) -> str:
"""simple docstring"""
_a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
_a : List[Any] = _readaa(UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_a : Any = _readaa(UpperCAmelCase )
_a : Optional[int] = _readaa(UpperCAmelCase )
_a : Optional[int] = _readaa(UpperCAmelCase )
_a : Tuple = bytestream.read(rows * cols * num_images )
_a : int = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
_a : List[Any] = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
_a : List[Any] = labels_dense.shape[0]
_a : List[str] = numpy.arange(UpperCAmelCase ) * num_classes
_a : List[str] = numpy.zeros((num_labels, num_classes) )
_a : Dict = 1
return labels_one_hot
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ) -> str:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
_a : List[Any] = _readaa(UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_a : str = _readaa(UpperCAmelCase )
_a : Dict = bytestream.read(UpperCAmelCase )
_a : List[str] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class UpperCamelCase_ :
@deprecated(
lowercase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ) -> Dict:
_a , _a : int = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_a : str = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_a : int = 10_000
_a : List[str] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
_a : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_a : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_a : List[str] = images.astype(numpy.floataa )
_a : Any = numpy.multiply(lowercase , 1.0 / 255.0 )
_a : Any = images
_a : Tuple = labels
_a : str = 0
_a : Dict = 0
@property
def snake_case__( self ) -> Tuple:
return self._images
@property
def snake_case__( self ) -> Optional[Any]:
return self._labels
@property
def snake_case__( self ) -> Tuple:
return self._num_examples
@property
def snake_case__( self ) -> Optional[Any]:
return self._epochs_completed
def snake_case__( self , lowercase , lowercase=False , lowercase=True ) -> int:
if fake_data:
_a : Optional[Any] = [1] * 784
_a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
_a : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_a : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_a : Any = self.images[perma]
_a : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_a : Any = self._num_examples - start
_a : Any = self._images[start : self._num_examples]
_a : Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_a : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_a : Union[str, Any] = self.images[perm]
_a : str = self.labels[perm]
# Start next epoch
_a : List[Any] = 0
_a : Optional[int] = batch_size - rest_num_examples
_a : int = self._index_in_epoch
_a : str = self._images[start:end]
_a : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_a : Optional[int] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
_a : Tuple = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
_a : Union[str, Any] = f.size()
print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ) -> Any:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
_a : Dict = fake()
_a : Union[str, Any] = fake()
_a : Any = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
_a : List[str] = DEFAULT_SOURCE_URL
_a : int = '''train-images-idx3-ubyte.gz'''
_a : Optional[int] = '''train-labels-idx1-ubyte.gz'''
_a : Union[str, Any] = '''t10k-images-idx3-ubyte.gz'''
_a : int = '''t10k-labels-idx1-ubyte.gz'''
_a : Tuple = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Union[str, Any] = _extract_images(UpperCAmelCase )
_a : Dict = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Optional[int] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
_a : str = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Any = _extract_images(UpperCAmelCase )
_a : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Tuple = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
_a : Union[str, Any] = (
'''Validation size should be between 0 and '''
F'{len(UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(UpperCAmelCase )
_a : str = train_images[:validation_size]
_a : List[Any] = train_labels[:validation_size]
_a : Union[str, Any] = train_images[validation_size:]
_a : List[str] = train_labels[validation_size:]
_a : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_a : Optional[Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
_a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
_a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
| 307
| 1
|
import re
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
A = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 106
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
lowerCamelCase__ =str(abs(__lowerCAmelCase ) )
lowerCamelCase__ =[list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int("".join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 530
| 0
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A_ : str ="""sshleifer/bart-tiny-random"""
A_ : Dict ="""patrickvonplaten/t5-tiny-random"""
@require_torch
class __a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return AutoConfig.from_pretrained(a__ )
def snake_case_ ( self ):
_lowerCamelCase , *_lowerCamelCase = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case_ ( self ):
_lowerCamelCase , *_lowerCamelCase = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
def snake_case_ ( self ):
_lowerCamelCase , *_lowerCamelCase = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case_ ( self ):
_lowerCamelCase , *_lowerCamelCase = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case_ ( self ):
with self.assertRaises(a__ ):
create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=a__ , d=a__ )
| 710
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A_ : Any =logging.get_logger(__name__)
A_ : Dict ={"""vocab_file""": """vocab.txt"""}
A_ : Optional[int] ={
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
A_ : Any ={
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Tuple:
with open(snake_case , 'r' ) as f:
_lowerCamelCase = f.read().splitlines()
return [l.strip() for l in lines]
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<unk>" , a__="<cls>" , a__="<pad>" , a__="<mask>" , a__="<eos>" , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = load_vocab_file(a__ )
_lowerCamelCase = dict(enumerate(self.all_tokens ) )
_lowerCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_lowerCamelCase = unk_token
_lowerCamelCase = cls_token
_lowerCamelCase = pad_token
_lowerCamelCase = mask_token
_lowerCamelCase = eos_token
_lowerCamelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case_ ( self , a__ ):
return self._id_to_token.get(a__ , self.unk_token )
def snake_case_ ( self , a__ ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , a__ , **a__ ):
return text.split()
def snake_case_ ( self , a__=False ):
return len(self._id_to_token )
def snake_case_ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case_ ( self , a__ ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , a__ ):
return self._id_to_token.get(a__ , self.unk_token )
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_lowerCamelCase = [1] + ([0] * len(a__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a__ ) + [1]
return mask
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = os.path.join(a__ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(a__ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case_ ( self ):
return self.get_vocab_size(with_added_tokens=a__ )
def snake_case_ ( self , a__ , a__ = False ):
return super()._add_tokens(a__ , special_tokens=a__ )
| 222
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCamelCase : Optional[int] = ["gpt2"]
lowerCamelCase : Any = "gpt2"
if is_tf_available():
class A( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
lowerCamelCase_ = TFGPTaLMHeadModel.from_config(A_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def a__ ( self : Tuple , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(A_ )
lowerCamelCase_ = tokenized['input_ids'].to_tensor()
lowerCamelCase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase_ = self.model(input_ids=A_ , attention_mask=A_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [GPTaTokenizer.from_pretrained(A_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase_ = [TFGPTaTokenizer.from_pretrained(A_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase_ = tokenizer([test_inputs] , return_tensors='tf' )
lowerCamelCase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase_ = python_outputs[key].numpy()
lowerCamelCase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A_ , tf.intaa ) == tf_outputs_values ) )
@slow
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(A_ )
for test_inputs in self.test_sentences:
lowerCamelCase_ = tf.constant(A_ )
lowerCamelCase_ = compiled_tokenizer(A_ )
lowerCamelCase_ = tf_tokenizer(A_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=A_ )
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = model.serving(A_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(A_ ) / 'saved.model'
tf.saved_model.save(A_ , A_ , signatures={'serving_default': model.serving} )
lowerCamelCase_ = tf.saved_model.load(A_ )
lowerCamelCase_ = loaded_model.signatures['serving_default'](A_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = tf_tokenizer(A_ ) # Build model with some sample inputs
lowerCamelCase_ = tf_tokenizer.get_config()
lowerCamelCase_ = TFGPTaTokenizer.from_config(A_ )
lowerCamelCase_ = model_from_config(A_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase_ = 123123
for max_length in [3, 5, 1024]:
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = tf_tokenizer(A_ , max_length=A_ )
lowerCamelCase_ = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 70
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int )->Union[str, Any]:
# Initialise PyTorch model
A__ = BigBirdConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
A__ = BigBirdForQuestionAnswering(UpperCamelCase__ )
else:
A__ = BigBirdForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase__ , UpperCamelCase__ , is_trivia_qa=UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
a__: Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 190
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''CLIPFeatureExtractor''']
UpperCamelCase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 569
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 569
| 1
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __A ( a_ : Optional[int] ):
lowerCAmelCase : List[str] = VideoMAEConfig()
set_architecture_configs(a_ ,a_ )
if "finetuned" not in model_name:
lowerCAmelCase : List[str] = False
if "finetuned" in model_name:
lowerCAmelCase : Optional[int] = "huggingface/label-files"
if "kinetics" in model_name:
lowerCAmelCase : List[str] = 4_0_0
lowerCAmelCase : int = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCAmelCase : int = 1_7_4
lowerCAmelCase : List[str] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(a_ ,a_ ,repo_type="dataset" ) ,"r" ) )
lowerCAmelCase : List[Any] = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase : Any = idalabel
lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def __A ( a_ : List[Any] ,a_ : List[str] ):
if "small" in model_name:
lowerCAmelCase : Tuple = 3_8_4
lowerCAmelCase : Any = 1_5_3_6
lowerCAmelCase : Optional[int] = 1_2
lowerCAmelCase : str = 1_6
lowerCAmelCase : List[Any] = 1_2
lowerCAmelCase : Dict = 3
lowerCAmelCase : List[Any] = 1_9_2
lowerCAmelCase : Dict = 7_6_8
elif "large" in model_name:
lowerCAmelCase : List[Any] = 1_0_2_4
lowerCAmelCase : Dict = 4_0_9_6
lowerCAmelCase : Tuple = 2_4
lowerCAmelCase : List[str] = 1_6
lowerCAmelCase : List[Any] = 1_2
lowerCAmelCase : str = 8
lowerCAmelCase : Tuple = 5_1_2
lowerCAmelCase : Optional[Any] = 2_0_4_8
elif "huge" in model_name:
lowerCAmelCase : List[str] = 1_2_8_0
lowerCAmelCase : Optional[int] = 5_1_2_0
lowerCAmelCase : Optional[Any] = 3_2
lowerCAmelCase : List[str] = 1_6
lowerCAmelCase : str = 1_2
lowerCAmelCase : Tuple = 8
lowerCAmelCase : Optional[Any] = 6_4_0
lowerCAmelCase : str = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def __A ( a_ : List[Any] ):
if "encoder." in name:
lowerCAmelCase : Union[str, Any] = name.replace("encoder." ,"" )
if "cls_token" in name:
lowerCAmelCase : Optional[Any] = name.replace("cls_token" ,"videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
lowerCAmelCase : int = name.replace("decoder_pos_embed" ,"decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase : Optional[int] = name.replace("pos_embed" ,"videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase : List[str] = name.replace("patch_embed.proj" ,"videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace("patch_embed.norm" ,"videomae.embeddings.norm" )
if "decoder.blocks" in name:
lowerCAmelCase : Optional[Any] = name.replace("decoder.blocks" ,"decoder.decoder_layers" )
if "blocks" in name:
lowerCAmelCase : Tuple = name.replace("blocks" ,"videomae.encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase : Any = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name and "bias" not in name:
lowerCAmelCase : Union[str, Any] = name.replace("attn" ,"attention.self" )
if "attn" in name:
lowerCAmelCase : Dict = name.replace("attn" ,"attention.attention" )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowerCAmelCase : List[Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase : Tuple = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase : int = name.replace("mlp.fc2" ,"output.dense" )
if "decoder_embed" in name:
lowerCAmelCase : Union[str, Any] = name.replace("decoder_embed" ,"decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCAmelCase : int = name.replace("decoder_norm" ,"decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCAmelCase : Union[str, Any] = name.replace("decoder_pred" ,"decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : Any = name.replace("norm.weight" ,"videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : Union[str, Any] = name.replace("norm.bias" ,"videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
lowerCAmelCase : int = name.replace("head" ,"classifier" )
return name
def __A ( a_ : Dict ,a_ : Tuple ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : int = orig_state_dict.pop(a_ )
if key.startswith("encoder." ):
lowerCAmelCase : Any = key.replace("encoder." ,"" )
if "qkv" in key:
lowerCAmelCase : str = key.split("." )
if key.startswith("decoder.blocks" ):
lowerCAmelCase : Any = config.decoder_hidden_size
lowerCAmelCase : str = int(key_split[2] )
lowerCAmelCase : Any = "decoder.decoder_layers."
if "weight" in key:
lowerCAmelCase : Optional[Any] = val[:dim, :]
lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase : int = config.hidden_size
lowerCAmelCase : Union[str, Any] = int(key_split[1] )
lowerCAmelCase : Dict = "videomae.encoder.layer."
if "weight" in key:
lowerCAmelCase : List[Any] = val[:dim, :]
lowerCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
lowerCAmelCase : Tuple = val[-dim:, :]
else:
lowerCAmelCase : Dict = val
return orig_state_dict
def __A ( ):
lowerCAmelCase : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
lowerCAmelCase : int = np.load(a_ )
return list(a_ )
def __A ( a_ : List[Any] ,a_ : Union[str, Any] ,a_ : Any ,a_ : Tuple ):
lowerCAmelCase : str = get_videomae_config(a_ )
if "finetuned" in model_name:
lowerCAmelCase : Union[str, Any] = VideoMAEForVideoClassification(a_ )
else:
lowerCAmelCase : Optional[int] = VideoMAEForPreTraining(a_ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase : Optional[int] = "pytorch_model.bin"
gdown.cached_download(a_ ,a_ ,quiet=a_ )
lowerCAmelCase : int = torch.load(a_ ,map_location="cpu" )
if "model" in files:
lowerCAmelCase : Dict = files["model"]
else:
lowerCAmelCase : int = files["module"]
lowerCAmelCase : Dict = convert_state_dict(a_ ,a_ )
model.load_state_dict(a_ )
model.eval()
# verify model on basic input
lowerCAmelCase : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase : Union[str, Any] = prepare_video()
lowerCAmelCase : Tuple = image_processor(a_ ,return_tensors="pt" )
if "finetuned" not in model_name:
lowerCAmelCase : List[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" ,filename="bool_masked_pos.pt" )
lowerCAmelCase : List[str] = torch.load(a_ )
lowerCAmelCase : str = model(**a_ )
lowerCAmelCase : Dict = outputs.logits
lowerCAmelCase : str = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase : List[str] = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase : Tuple = torch.Size([1, 1_7_4] )
lowerCAmelCase : int = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
lowerCAmelCase : str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Union[str, Any] = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
lowerCAmelCase : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Tuple = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase : Optional[Any] = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
lowerCAmelCase : Any = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : List[Any] = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase : int = torch.Size([1, 4_0_0] )
lowerCAmelCase : str = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase : Optional[int] = torch.Size([1, 4_0_0] )
lowerCAmelCase : Optional[int] = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase : Optional[int] = torch.Size([1, 4_0_0] )
lowerCAmelCase : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : str = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase : Optional[Any] = torch.Size([1, 1_7_4] )
lowerCAmelCase : List[str] = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Any = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase : int = torch.Size([1, 1_7_4] )
lowerCAmelCase : List[str] = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,a_ ,atol=1e-4 )
else:
print("Logits:" ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,a_ ,atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase : Any = outputs.loss
assert torch.allclose(a_ ,a_ ,atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
model.save_pretrained(a_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(a_ ,organization="nielsr" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 525
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __A ( ):
lowerCAmelCase , lowerCAmelCase : List[Any] = 9, 1_4 # noqa: F841
lowerCAmelCase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCAmelCase : Optional[Any] = defaultdict(a_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase : int = mst(a_ )
lowerCAmelCase : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase : List[str] = tuple(answer[:2] )
lowerCAmelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 525
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
_a = 0
if start < end:
_a = randint(_UpperCamelCase , _UpperCamelCase )
_a = a[end]
_a = a[pivot]
_a = temp
_a , _a = _in_place_partition(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += _in_place_quick_sort(_UpperCamelCase , _UpperCamelCase , p - 1 )
count += _in_place_quick_sort(_UpperCamelCase , p + 1 , _UpperCamelCase )
return count
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
_a = 0
_a = randint(_UpperCamelCase , _UpperCamelCase )
_a = a[end]
_a = a[pivot]
_a = temp
_a = start - 1
for index in range(_UpperCamelCase , _UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a = new_pivot_index + 1
_a = a[new_pivot_index]
_a = a[index]
_a = temp
_a = a[new_pivot_index + 1]
_a = a[end]
_a = temp
return new_pivot_index + 1, count
lowerCamelCase :Dict = TemporaryFile()
lowerCamelCase :Optional[int] = 100 # 1000 elements are to be sorted
lowerCamelCase , lowerCamelCase :int = 0, 1 # mean and standard deviation
lowerCamelCase :Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase :List[str] = np.load(outfile)
lowerCamelCase :str = len(M) - 1
lowerCamelCase :Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 346
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
| 1
|
import math
import sys
import cva
import numpy as np
def _lowerCamelCase ( __A : np.ndarray , __A : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_UpperCAmelCase : List[str] = math.sqrt(__A )
_UpperCAmelCase : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : int , __A : int ) -> np.ndarray:
_UpperCAmelCase : Tuple = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCamelCase ( __A : int , __A : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_UpperCAmelCase : Tuple = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __A ):
for j in range(0 , __A ):
_UpperCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__A , __A )
def _lowerCamelCase ( __A : np.ndarray , __A : float , __A : float , __A : int , ) -> np.ndarray:
_UpperCAmelCase : Dict = np.zeros(img.shape )
_UpperCAmelCase : Any = get_gauss_kernel(__A , __A )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_UpperCAmelCase : List[str] = get_slice(__A , __A , __A , __A )
_UpperCAmelCase : Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCAmelCase : Optional[Any] = vec_gaussian(__A , __A )
_UpperCAmelCase : Union[str, Any] = np.multiply(__A , __A )
_UpperCAmelCase : List[str] = np.multiply(__A , __A )
_UpperCAmelCase : Tuple = np.sum(__A ) / np.sum(__A )
_UpperCAmelCase : Tuple = val
return imga
def _lowerCamelCase ( __A : list ) -> tuple:
_UpperCAmelCase : Optional[int] = args[1] if args[1:] else '''../image_data/lena.jpg'''
_UpperCAmelCase : Optional[int] = float(args[2] ) if args[2:] else 1.0
_UpperCAmelCase : List[str] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCAmelCase : str = int(args[4] )
_UpperCAmelCase : Dict = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parse_args(sys.argv)
SCREAMING_SNAKE_CASE = cva.imread(filename, 0)
cva.imshow('input image', img)
SCREAMING_SNAKE_CASE = img / 255
SCREAMING_SNAKE_CASE = out.astype('float32')
SCREAMING_SNAKE_CASE = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE = out * 255
SCREAMING_SNAKE_CASE = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 485
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCamelCase ( __A : int = 2_000_000 ) -> int:
_UpperCAmelCase : list[int] = [0]
_UpperCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCAmelCase : int = 0
# an estimate of b, using the quadratic formula
_UpperCAmelCase : float
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the triangle number corresponding to b_floor
_UpperCAmelCase : int
# the triangle number corresponding to b_ceil
_UpperCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCAmelCase : str = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCAmelCase : Dict = floor(__A )
_UpperCAmelCase : List[Any] = ceil(__A )
_UpperCAmelCase : Union[str, Any] = triangle_numbers[b_floor]
_UpperCAmelCase : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a
_UpperCAmelCase : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Any = triangle_b_second_guess * triangle_a
_UpperCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 485
| 1
|
import json
import sys
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(_A , encoding='''utf-8''' ) as f:
A_ = json.load(_A )
A_ = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(_A ):
A_ = results[benchmark_name]
A_ = benchmark_name.split('''/''' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
A_ = '''| metric |'''
A_ = '''|--------|'''
A_ = '''| new / old (diff) |'''
for metric_name in sorted(_A ):
A_ = benchmark_res[metric_name]
A_ = metric_vals['''new''']
A_ = metric_vals.get('''old''' , _A )
A_ = metric_vals.get('''diff''' , _A )
A_ = f" {new_val:f}" if isinstance(_A , (int, float) ) else '''None'''
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(_A , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(_A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(_A ) )
if __name__ == "__main__":
__lowercase = sys.argv[1]
__lowercase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 709
|
from __future__ import annotations
import pandas as pd
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
A_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i]
A_ = 0
A_ = 0
A_ = 999999999
A_ = 0
A_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A_ = remaining_time[j]
A_ = j
A_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A_ = remaining_time[short]
if minm == 0:
A_ = 999999999
if remaining_time[short] == 0:
complete += 1
A_ = False
# Find finish time of current process
A_ = increment_time + 1
# Calculate waiting time
A_ = finish_time - arrival_time[short]
A_ = finar - burst_time[short]
if waiting_time[short] < 0:
A_ = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(SCREAMING_SNAKE_CASE ):
A_ = total_waiting_time + waiting_time[i]
A_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowercase = int(input())
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
__lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowercase , __lowercase = map(int, input().split())
__lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowercase = burst_time
__lowercase = no_of_processes
__lowercase = waiting_time
__lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 563
| 0
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase__ ( lowerCamelCase : str , lowerCamelCase : Optional[int] ) -> Dict:
for e in env_keys:
lowerCAmelCase__ : Any = int(os.environ.get(lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : str=False ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = os.environ.get(lowerCamelCase , str(lowerCamelCase ) )
return strtobool(lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase__ ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]="no" ) -> int:
lowerCAmelCase__ : str = os.environ.get(lowerCamelCase , str(lowerCamelCase ) )
return value
| 308
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=False , lowerCamelCase : Tuple=False ) -> str:
lowerCAmelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> Tuple:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : List[Any] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Any = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ : Tuple = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowerCamelCase : Tuple ) -> Any:
lowerCAmelCase__ : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def lowercase__ ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ) -> List[Any]:
lowerCAmelCase__ : List[Any] = dct.pop(lowerCamelCase )
lowerCAmelCase__ : List[Any] = val
@torch.no_grad()
def lowercase__ ( lowerCamelCase : Dict , lowerCamelCase : Dict ) -> List[str]:
lowerCAmelCase__ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=lowerCamelCase )
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Union[str, Any] = 3_1_2_9
lowerCAmelCase__ : Any = "huggingface/label-files"
lowerCAmelCase__ : Optional[int] = "vqa2-id2label.json"
lowerCAmelCase__ : Dict = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Union[str, Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = idalabel
lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = ViltForQuestionAnswering(lowerCamelCase )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : int = {0: "False", 1: "True"}
lowerCAmelCase__ : Dict = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Optional[int] = ViltForImagesAndTextClassification(lowerCamelCase )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = ViltForImageAndTextRetrieval(lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : List[Any] = ViltForMaskedLM(lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["state_dict"]
lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase )
if mlm_model or irtr_model:
lowerCAmelCase__ : List[Any] = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase )
# Define processor
lowerCAmelCase__ : List[str] = ViltImageProcessor(size=3_8_4 )
lowerCAmelCase__ : Optional[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCAmelCase__ : Union[str, Any] = ViltProcessor(lowerCamelCase , lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ : List[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase ).raw )
lowerCAmelCase__ : Dict = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase ).raw )
lowerCAmelCase__ : Tuple = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowerCAmelCase__ : Optional[int] = processor(lowerCamelCase , lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Union[str, Any] = processor(lowerCamelCase , lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Any = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ : Any = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=lowerCamelCase ).raw )
if mlm_model:
lowerCAmelCase__ : Tuple = "a bunch of [MASK] laying on a [MASK]."
else:
lowerCAmelCase__ : List[Any] = "How many cats are there?"
lowerCAmelCase__ : int = processor(lowerCamelCase , lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : List[str] = model(**lowerCamelCase )
# Verify outputs
if mlm_model:
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ : List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ : List[Any] = torch.Size([1, 3_1_2_9] )
lowerCAmelCase__ : Optional[int] = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ : str = torch.Size([1, 2] )
lowerCAmelCase__ : List[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase__ : List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case_ = "lm_head"
snake_case_ = getattr(_A , _A )
if weight_type is not None:
snake_case_ = getattr(_A , _A ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_A )[0].split("." )[-2]
snake_case_ = mapped_key.replace("*" , _A )
if "weight_g" in name:
snake_case_ = "weight_g"
elif "weight_v" in name:
snake_case_ = "weight_v"
elif "bias" in name:
snake_case_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = "weight"
else:
snake_case_ = None
set_recursively(_A , _A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = full_name.split("conv_layers." )[-1]
snake_case_ = name.split("." )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=None , _A=None , _A=True ):
'''simple docstring'''
if config_path is not None:
snake_case_ = UniSpeechConfig.from_pretrained(_A )
else:
snake_case_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load_from_json(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_A , "vocab.json" )
if not os.path.isdir(_A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 42
snake_case_ = 43
with open(_A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_A , _A )
snake_case_ = WavaVecaPhonemeCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_A , )
snake_case_ = True if config.feat_extract_norm == "layer" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
snake_case_ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
snake_case_ = UniSpeechForCTC(_A )
else:
snake_case_ = UniSpeechForPreTraining(_A )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ = model[0].eval()
recursively_load_weights(_A , _A , _A )
hf_unispeech.save_pretrained(_A )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 700
|
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139
| 0
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCamelCase ( lowerCAmelCase__: int=None ,lowerCAmelCase__: Union[str, Any]=None ) -> List[str]:
return field(default_factory=lambda: default ,metadata=__lowerCAmelCase )
@dataclass
class snake_case :
"""simple docstring"""
_a = field(
metadata={"""help""": """The csv file to plot."""}, )
_a = field(
default=UpperCAmelCase_, metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""}, )
_a = field(
default=UpperCAmelCase_, metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""}, )
_a = field(
default=UpperCAmelCase_, metadata={"""help""": """Disable logarithmic scale when plotting"""}, )
_a = field(
default=UpperCAmelCase_, metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
}, )
_a = field(
default=UpperCAmelCase_, metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""}, )
_a = list_field(
default=UpperCAmelCase_, metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def _UpperCamelCase ( lowerCAmelCase__: List[str] ) -> Union[str, Any]:
try:
int(__lowerCAmelCase )
return True
except ValueError:
return False
def _UpperCamelCase ( lowerCAmelCase__: Any ) -> Any:
try:
float(__lowerCAmelCase )
return True
except ValueError:
return False
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = args
SCREAMING_SNAKE_CASE_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file, newline='' ) as csv_file:
SCREAMING_SNAKE_CASE_ = csv.DictReader(lowerCamelCase__ )
for row in reader:
SCREAMING_SNAKE_CASE_ = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE_ = float(row['result'] )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = plt.subplots()
SCREAMING_SNAKE_CASE_ = """Time usage""" if self.args.is_time else """Memory usage"""
SCREAMING_SNAKE_CASE_ = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE_ = sorted(set(self.result_dict[model_name]['bsz'] ) )
SCREAMING_SNAKE_CASE_ = sorted(set(self.result_dict[model_name]['seq_len'] ) )
SCREAMING_SNAKE_CASE_ = self.result_dict[model_name]["""result"""]
(SCREAMING_SNAKE_CASE_) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=lowerCamelCase__, )
else:
SCREAMING_SNAKE_CASE_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.floataa, )
(SCREAMING_SNAKE_CASE_) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
SCREAMING_SNAKE_CASE_ = np.asarray(lowerCamelCase__, lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__, lowerCamelCase__, label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(lowerCamelCase__, lowerCamelCase__, '--' )
title_str += f""" {label_model_name} vs."""
SCREAMING_SNAKE_CASE_ = title_str[:-4]
SCREAMING_SNAKE_CASE_ = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _UpperCamelCase ( ) -> str:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE_ = Plot(args=__lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 294
|
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__UpperCamelCase : List[str] = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase = input('Enter a string ').strip()
UpperCamelCase = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 269
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE = 0.0
for coeff in reversed(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = result * x + coeff
return result
if __name__ == "__main__":
__UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 720
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
SCREAMING_SNAKE_CASE = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **lowerCAmelCase__ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def __A ( self , **lowerCAmelCase__ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def __A ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 327
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : Dict = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : str = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : Union[str, Any] = apply_ocr
def snake_case_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'apply_ocr' ) )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> int:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processing
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
self.assertIsInstance(encoding.words, SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes, SCREAMING_SNAKE_CASE_ )
# Test batched
UpperCamelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def snake_case_ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : str = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processing
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
UpperCamelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def snake_case_ ( self ) -> Union[str, Any]:
# with apply_OCR = True
UpperCamelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('hf-internal-testing/fixtures_docvqa', split='test' )
UpperCamelCase : Dict = Image.open(ds[0]['file'] ).convert('RGB' )
UpperCamelCase : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
UpperCamelCase : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes, SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
UpperCamelCase : Any = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
| 40
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = "M-CLIP"
def __init__( self : Union[str, Any] , A : Optional[Any]=1024 , A : List[str]=768 , **A : Union[str, Any] ):
_UpperCAmelCase : str = transformerDimSize
_UpperCAmelCase : int = imageDimSize
super().__init__(**A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = MCLIPConfig
def __init__( self : Optional[int] , A : Union[str, Any] , *A : Any , **A : Optional[int] ):
super().__init__(A , *A , **A )
_UpperCAmelCase : Optional[int] = XLMRobertaModel(A )
_UpperCAmelCase : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _A ( self : int , A : int , A : int ):
_UpperCAmelCase : Optional[int] = self.transformer(input_ids=A , attention_mask=A )[0]
_UpperCAmelCase : Optional[int] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A ), embs
| 244
| 0
|
from heapq import heappop, heappush
import numpy as np
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=__UpperCAmelCase )
UpperCAmelCase_ = None
while queue:
(UpperCAmelCase_) = heappop(__UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ = predecessors[x, y]
path.append(__UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCAmelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__UpperCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = _distribute_shards(**__UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _split_gen_kwargs(__UpperCAmelCase , __UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(__UpperCAmelCase )
else:
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
assert out == expected
| 561
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowerCAmelCase = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(_UpperCamelCase )
__lowerCAmelCase = val
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_UpperCamelCase )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if "vqa" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = 3129
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "vqa2-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = ViltForQuestionAnswering(_UpperCamelCase )
elif "nlvr" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = 2
__lowerCAmelCase = {0: "False", 1: "True"}
__lowerCAmelCase = {v: k for k, v in config.idalabel.items()}
__lowerCAmelCase = 3
__lowerCAmelCase = ViltForImagesAndTextClassification(_UpperCamelCase )
elif "irtr" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = ViltForImageAndTextRetrieval(_UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = ViltForMaskedLM(_UpperCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["state_dict"]
__lowerCAmelCase = create_rename_keys(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
if mlm_model or irtr_model:
__lowerCAmelCase = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_UpperCamelCase )
# Define processor
__lowerCAmelCase = ViltImageProcessor(size=384 )
__lowerCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowerCAmelCase = ViltProcessor(_UpperCamelCase , _UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
__lowerCAmelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_UpperCamelCase ).raw )
__lowerCAmelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_UpperCamelCase ).raw )
__lowerCAmelCase = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
__lowerCAmelCase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors="pt" )
__lowerCAmelCase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors="pt" )
__lowerCAmelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__lowerCAmelCase = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_UpperCamelCase ).raw )
if mlm_model:
__lowerCAmelCase = "a bunch of [MASK] laying on a [MASK]."
else:
__lowerCAmelCase = "How many cats are there?"
__lowerCAmelCase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors="pt" )
__lowerCAmelCase = model(**_UpperCamelCase )
# Verify outputs
if mlm_model:
__lowerCAmelCase = torch.Size([1, 11, 3_0522] )
__lowerCAmelCase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
__lowerCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__lowerCAmelCase = torch.Size([1, 3129] )
__lowerCAmelCase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__lowerCAmelCase = torch.Size([1, 2] )
__lowerCAmelCase = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Tuple = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
A : List[Any] = True
from torch.cuda.amp import autocast
A : Any = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to log verbose messages or not."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=2.0 ,metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.5 ,metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.999_995 ,metadata={"""help""": """Decay of gumbel temperature during training."""} )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = logging.WARNING
if model_args.verbose_logging:
__lowerCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCAmelCase = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default="""train""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""validation""" ,metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""file""" ,metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} ,)
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__UpperCAmelCase : Optional[int] =field(
default=1 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
__UpperCAmelCase : Optional[int] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=20.0 ,metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : WavaVecaForPreTraining
__UpperCAmelCase : WavaVecaFeatureExtractor
__UpperCAmelCase : Union[bool, str] ="longest"
__UpperCAmelCase : Optional[int] =None
__UpperCAmelCase : Optional[int] =None
def __call__( self , __a ):
# reformat list to dict and set to pytorch format
__lowerCAmelCase = self.feature_extractor.pad(
__a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
__lowerCAmelCase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
__lowerCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCAmelCase = 1
__lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowerCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__a , min_masks=2 , )
return batch
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , __a=1 , __a=0 , __a=1.0 , **__a ):
super().__init__(*__a , **__a )
__lowerCAmelCase = 0
__lowerCAmelCase = max_gumbel_temp
__lowerCAmelCase = min_gumbel_temp
__lowerCAmelCase = gumbel_temp_decay
def snake_case ( self , __a , __a ):
model.train()
__lowerCAmelCase = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(__a , __a )
else:
__lowerCAmelCase = self.compute_loss(__a , __a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase ):
# check that all files have the correct sampling rate
__lowerCAmelCase , __lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCAmelCase = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
__lowerCAmelCase = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCAmelCase = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
__lowerCAmelCase = WavaVecaForPreTraining(_UpperCamelCase )
__lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
__lowerCAmelCase = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 636
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : Union[str, Any] = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111
|
def snake_case__ ( UpperCAmelCase : Tuple ):
lowerCAmelCase__ :List[Any] = len(UpperCAmelCase )
for i in range(length - 1 ):
lowerCAmelCase__ :Union[str, Any] = i
for k in range(i + 1 , UpperCAmelCase ):
if collection[k] < collection[least]:
lowerCAmelCase__ :Any = k
if least != i:
lowerCAmelCase__ ,lowerCAmelCase__ :int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
_a : Any = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 111
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.