code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =3
_lowercase =250
_lowercase =ids_tensor((batch_size, length) , lowerCAmelCase )
_lowercase =torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self._get_tensors(5 )
_lowercase =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =MaxLengthCriteria(max_length=10 )
_lowercase , _lowercase =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowercase , _lowercase =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase , _lowercase =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase =self._get_tensors(5 )
_lowercase =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_lowercase =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 291 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 42
_a = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 291 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
_snake_case = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
_snake_case = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_snake_case = BeautifulSoup(res.text, "html.parser")
_snake_case = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 721 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if "://" in dataset_path:
SCREAMING_SNAKE_CASE_ : str =dataset_path.split('://' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ) ,fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_ ,lowerCAmelCase_ ,recursive=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
if hasattr(fsspec.asyn ,'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
SCREAMING_SNAKE_CASE_ : Any =threading.Lock()
| 220 |
from math import factorial, radians
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 18 , SCREAMING_SNAKE_CASE = 10 ):
'''simple docstring'''
A_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
A_ = radians(SCREAMING_SNAKE_CASE )
A_ = angle_in_radians
A_ = 3
A_ = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
A_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 203 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1_0_0_0 ) ->Dict:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase = n - 1
UpperCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase = 0
while count < prec:
UpperCAmelCase = random.randint(2 , n - 1 )
UpperCAmelCase = bin_exp_mod(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if b != 1:
UpperCAmelCase = True
for _ in range(lowerCAmelCase_ ):
if b == n - 1:
UpperCAmelCase = False
break
UpperCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__a = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 627 |
import math
import qiskit
def _UpperCamelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 ) ->qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
UpperCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
UpperCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
UpperCAmelCase = [input_a, input_a, carry_in]
UpperCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
UpperCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
UpperCAmelCase = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_0_0_0 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 627 | 1 |
'''simple docstring'''
from collections.abc import Callable
def A_ ( _lowerCamelCase : Callable[[float], float] , _lowerCamelCase : float , _lowerCamelCase : float ):
_lowerCAmelCase = a
_lowerCAmelCase = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_lowerCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_lowerCAmelCase = mid
else:
_lowerCAmelCase = mid
_lowerCAmelCase = start + (end - start) / 2.0
return mid
def A_ ( _lowerCamelCase : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 309 | '''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def a ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__lowerCAmelCase )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Dict ):
"""simple docstring"""
_lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , only_pretrain_model=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = 'patrickvonplaten/t5-tiny-random'
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Dict ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCAmelCase , save_to_csv=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__lowerCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__lowerCAmelCase , 'env.csv' ) , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , 'env.csv' ) ).exists() )
def a ( self : Dict ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__lowerCAmelCase : Dict ):
self.assertTrue(hasattr(__lowerCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'current' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCAmelCase , 'log.txt' ) , log_print=__lowerCAmelCase , trace_memory_line_by_line=__lowerCAmelCase , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCAmelCase = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , 'log.txt' ) ).exists() )
| 309 | 1 |
from __future__ import annotations
from collections.abc import Callable
SCREAMING_SNAKE_CASE__ = list[list[float | int]]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = len(snake_case_ )
UpperCamelCase = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
for row in range(snake_case_ ):
for col in range(snake_case_ ):
UpperCamelCase = matrix[row][col]
UpperCamelCase = vector[row][0]
UpperCamelCase = 0
UpperCamelCase = 0
while row < size and col < size:
# pivoting
UpperCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_ , snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case_ ):
UpperCamelCase = augmented[rowa][col] / augmented[row][col]
UpperCamelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case_ ):
for row in range(snake_case_ ):
UpperCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case_ )
]
def lowercase__ ( __UpperCamelCase )-> Any:
UpperCamelCase = len(snake_case_ )
UpperCamelCase = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
UpperCamelCase = [[0] for _ in range(snake_case_ )]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
UpperCamelCase = (x_val + 1) ** (size - col - 1)
UpperCamelCase = y_val
UpperCamelCase = solve(snake_case_ , snake_case_ )
def interpolated_func(__UpperCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowercase__ ( __UpperCamelCase )-> Any:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase__ ( __UpperCamelCase = question_function , __UpperCamelCase = 10 )-> Union[str, Any]:
UpperCamelCase = [func(snake_case_ ) for x_val in range(1 , order + 1 )]
UpperCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCamelCase = 0
UpperCamelCase = 42
UpperCamelCase = 42
for poly in polynomials:
UpperCamelCase = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 703 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> tuple[float, list[float]]:
UpperCamelCase = list(range(len(__UpperCamelCase ) ) )
UpperCamelCase = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number | (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case ( ) -> int:
lowerCamelCase : List[str] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase : List[Any] = Dataset.from_dict(UpperCamelCase__ )
return dataset
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: List[str] )-> str:
lowerCamelCase : Union[str, Any] = get_dataset()
lowerCamelCase : Union[str, Any] = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : List[str] = get_dataset()
lowerCamelCase , lowerCamelCase : Optional[Any] = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __a )
| 222 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : str ) -> int:
debug_launcher(test_script.main )
def snake_case_ ( self : Optional[int] ) -> List[Any]:
debug_launcher(test_ops.main )
| 716 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCAmelCase : Any = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCAmelCase : Optional[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase :
def __init__( self : Tuple ) -> List[Any]:
_a : List[Any] = WATERMARK_BITS
_a : List[str] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case_ ( self : Dict , __snake_case : torch.FloatTensor ) -> Optional[Any]:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
_a : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a : Any = [self.encoder.encode(__snake_case , '''dwtDct''' ) for image in images]
_a : Optional[int] = torch.from_numpy(np.array(__snake_case ) ).permute(0 , 3 , 1 , 2 )
_a : Any = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 249 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=0 ):
# Format the message.
if name is None:
__magic_name__ : List[str] =None
else:
__magic_name__ : Optional[Any] =""".""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
__magic_name__ : Optional[int] =fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , """:""" , val.size() )
else:
print(lowerCamelCase , """:""" , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__magic_name__ : Union[str, Any] =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__magic_name__ : int =(num_heads, hidden_size, num_splits) + input_shape[1:]
__magic_name__ : List[str] =param.view(*lowerCamelCase )
__magic_name__ : List[Any] =param.transpose(0 , 2 )
__magic_name__ : int =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__magic_name__ : Any =(num_heads, num_splits, hidden_size) + input_shape[1:]
__magic_name__ : int =param.view(*lowerCamelCase )
__magic_name__ : str =param.transpose(0 , 1 ).contiguous()
__magic_name__ : List[Any] =param.view(*lowerCamelCase )
return param
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# The converted output model.
__magic_name__ : Optional[int] ={}
# old versions did not store training args
__magic_name__ : Any =input_state_dict.get("""args""" , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__magic_name__ : List[str] =ds_args.padded_vocab_size
__magic_name__ : List[str] =ds_args.max_position_embeddings
__magic_name__ : List[Any] =ds_args.hidden_size
__magic_name__ : Union[str, Any] =ds_args.num_layers
__magic_name__ : Tuple =ds_args.num_attention_heads
__magic_name__ : List[str] =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__magic_name__ : Dict =config.n_head
# The hidden_size per head.
__magic_name__ : int =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__magic_name__ : Union[str, Any] =input_state_dict["""checkpoint_version"""]
else:
__magic_name__ : Any =0.0
# The model.
__magic_name__ : Dict =input_state_dict["""model"""]
# The language model.
__magic_name__ : Tuple =model["""language_model"""]
# The embeddings.
__magic_name__ : List[str] =lm["""embedding"""]
# The word embeddings.
__magic_name__ : str =embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
__magic_name__ : Dict =word_embeddings[: config.vocab_size, :]
__magic_name__ : Optional[int] =word_embeddings
# The position embeddings.
__magic_name__ : str =embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__magic_name__ : Union[str, Any] =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
__magic_name__ : Optional[int] =pos_embeddings
# The transformer.
__magic_name__ : Dict =lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
__magic_name__ : List[str] =re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
__magic_name__ : List[Any] ={
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__magic_name__ : List[Any] =layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__magic_name__ : str =int(m.group(1 ) )
# The name of the operation.
__magic_name__ : Dict =m.group(2 )
# Is it a weight or a bias?
__magic_name__ : Dict =m.group(3 )
# The name of the layer.
__magic_name__ : List[str] =F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
__magic_name__ : List[Any] ="""ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
__magic_name__ : Optional[int] =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__magic_name__ : Optional[Any] =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
__magic_name__ : Dict =causal_mask
# Insert a "dummy" tensor for masked_bias.
__magic_name__ : Any =torch.tensor(-1E4 , dtype=torch.floataa )
__magic_name__ : Dict =masked_bias
__magic_name__ : List[str] =fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__magic_name__ : Optional[Any] =out_val.transpose(0 , 1 ).contiguous()
# Store.
__magic_name__ : Any =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__magic_name__ : Dict =fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
__magic_name__ : Union[str, Any] =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__magic_name__ : Tuple =megatron_to_transformers[op_name]
__magic_name__ : List[str] =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__magic_name__ : Optional[int] =megatron_to_transformers[op_name]
__magic_name__ : Dict =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__magic_name__ : Any =transformer["""final_layernorm.weight"""]
__magic_name__ : Tuple =transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
__magic_name__ : int =word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
__magic_name__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
__magic_name__ : Any =parser.parse_args()
# Extract the basename.
__magic_name__ : Tuple =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
__magic_name__ : Optional[Any] =torch.load(lowerCamelCase , map_location="""cpu""" )
else:
__magic_name__ : Dict =torch.load(args.path_to_checkpoint , map_location="""cpu""" )
__magic_name__ : Optional[Any] =input_state_dict.get("""args""" , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__magic_name__ : Optional[Any] ="""gelu_fast"""
elif ds_args.openai_gelu:
__magic_name__ : Optional[Any] ="""gelu_new"""
else:
__magic_name__ : List[Any] ="""gelu"""
else:
# in the very early days this used to be "gelu_new"
__magic_name__ : Dict ="""gelu_new"""
# Spell out all parameters in case the defaults change.
__magic_name__ : Any =GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
__magic_name__ : Optional[int] =GPTaConfig.from_json_file(args.config_file )
__magic_name__ : Tuple =["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
__magic_name__ : int =convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__magic_name__ : Union[str, Any] =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__magic_name__ : List[Any] ="""gpt2"""
elif tokenizer_type == "PretrainedFromHF":
__magic_name__ : Optional[Any] =ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
__magic_name__ : List[Any] ="""gpt2"""
__magic_name__ : Dict =AutoTokenizer.from_pretrained(lowerCamelCase )
__magic_name__ : Tuple =type(lowerCamelCase ).__name__
__magic_name__ : List[Any] =tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
__magic_name__ : Optional[int] =os.path.join(lowerCamelCase , """pytorch_model.bin""" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 1 |
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : int = size
_snake_case : Dict = [0] * size
_snake_case : int = [0] * size
@staticmethod
def UpperCamelCase ( lowercase_ ):
return index | (index + 1)
@staticmethod
def UpperCamelCase ( lowercase_ ):
return (index & (index + 1)) - 1
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = value
while index < self.size:
_snake_case : Union[str, Any] = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
_snake_case : Optional[int] = value
else:
_snake_case : Any = max(lowercase_ , lowercase_ , lowercase_ )
_snake_case : List[Any] = self.get_next(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
right -= 1 # Because of right is exclusive
_snake_case : Optional[int] = 0
while left <= right:
_snake_case : List[Any] = self.get_prev(lowercase_ )
if left <= current_left:
_snake_case : str = max(lowercase_ , self.tree[right] )
_snake_case : Any = current_left
else:
_snake_case : Any = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 580 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = tempfile.mkdtemp()
_snake_case : List[str] = BlipImageProcessor()
_snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_snake_case : Optional[int] = BlipaProcessor(lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **lowercase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def UpperCamelCase ( self , **lowercase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Dict = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Dict = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : str = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Tuple = self.prepare_image_inputs()
_snake_case : Optional[int] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : Any = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : Tuple = processor(text=lowercase_ )
_snake_case : Union[str, Any] = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : str = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Optional[int] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Any = "lower newer"
_snake_case : str = self.prepare_image_inputs()
_snake_case : List[str] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : List[str] = self.get_image_processor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Tuple = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Tuple = processor.batch_decode(lowercase_ )
_snake_case : Union[str, Any] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : Dict = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : int = "lower newer"
_snake_case : Any = self.prepare_image_inputs()
_snake_case : str = processor(text=lowercase_ , images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 580 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__A =1.054_571_817E-34 # unit of ℏ : J * s
__A =3E8 # unit of c : m * s^-1
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCamelCase_ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase_ = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase_ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A =logging.getLogger(__name__)
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowerCamelCase_ = parser.parse_args()
return args
def lowerCamelCase_ ( lowerCamelCase__ ):
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowerCamelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowerCamelCase_ = tf.train.Features(feature=lowerCamelCase__ )
lowerCamelCase_ = tf.train.Example(features=lowerCamelCase__ )
lowerCamelCase_ = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , args.limit )
lowerCamelCase_ = dataset.select(range(lowerCamelCase__ ) )
print(F'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowerCamelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ = tokenize_function(lowerCamelCase__ )
lowerCamelCase_ = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowerCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_0_0_0 , num_proc=4 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowerCamelCase_ = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ = len(dataset_snapshot["input_ids"] )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase_ = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowerCamelCase__ )
if __name__ == "__main__":
__A =parse_args()
main(args)
| 463 | 1 |
'''simple docstring'''
import qiskit
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 2 ):
'''simple docstring'''
_snake_case = qubits
# Using Aer's simulator
_snake_case = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE__ ) ) , list(range(SCREAMING_SNAKE_CASE__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_snake_case = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 368 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bit'''
UpperCAmelCase__ : Union[str, Any] = ['''preactivation''', '''bottleneck''']
UpperCAmelCase__ : int = ['''SAME''', '''VALID''']
def __init__( self , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=[256, 512, 1_024, 2_048] , lowerCamelCase=[3, 4, 6, 3] , lowerCamelCase="preactivation" , lowerCamelCase="relu" , lowerCamelCase=None , lowerCamelCase=32 , lowerCamelCase=0.0 , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=1 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_snake_case = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = global_padding
_snake_case = num_groups
_snake_case = drop_path_rate
_snake_case = embedding_dynamic_padding
_snake_case = output_stride
_snake_case = width_factor
_snake_case = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 368 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A: List[str] = logging.get_logger(__name__)
@add_end_docstrings(
snake_case_ , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
UpperCAmelCase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_masked_index(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
UpperCAmelCase : List[Any] = self.framework
UpperCAmelCase : str = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = self.model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = model_inputs["""input_ids"""]
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase : Union[str, Any] = target_ids.shape[0]
UpperCAmelCase : int = model_outputs["""input_ids"""][0]
UpperCAmelCase : List[Any] = model_outputs["""logits"""]
if self.framework == "tf":
UpperCAmelCase : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase : Optional[int] = outputs.numpy()
UpperCAmelCase : str = outputs[0, masked_index, :]
UpperCAmelCase : List[Any] = stable_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
UpperCAmelCase : int = tf.gather_nd(tf.squeeze(_SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase : Tuple = tf.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
UpperCAmelCase : str = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase : str = outputs[0, masked_index, :]
UpperCAmelCase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase : str = probs[..., target_ids]
UpperCAmelCase : Optional[Any] = probs.topk(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase : List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase : str = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase : Optional[Any] = target_ids[p].tolist()
UpperCAmelCase : List[Any] = p
# Filter padding out:
UpperCAmelCase : str = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase : Tuple = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_SCREAMING_SNAKE_CASE )
result.append(_SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = [targets]
try:
UpperCAmelCase : Tuple = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Any = []
for target in targets:
UpperCAmelCase : Optional[int] = vocab.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if id_ is None:
UpperCAmelCase : List[Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , max_length=1 , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""]
if len(_SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCAmelCase : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
UpperCAmelCase : Union[str, Any] = list(set(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCAmelCase : Any = np.array(_SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = {}
if targets is not None:
UpperCAmelCase : Dict = self.get_target_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = target_ids
if top_k is not None:
UpperCAmelCase : Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 160 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[list[str]] , lowerCamelCase__ : int , ):
'''simple docstring'''
A: Union[str, Any] = len(lowerCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase__ , lowerCamelCase__ , )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
A: list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase__ , lowerCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase__ )
print("""""" )
print(len(lowerCamelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 135 | 0 |
'''simple docstring'''
UpperCamelCase__ : Optional[int] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 712 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : str = controlnet_params
__SCREAMING_SNAKE_CASE : str = """bird"""
__SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
__SCREAMING_SNAKE_CASE : str = pipe.prepare_text_inputs([prompts] * num_samples )
__SCREAMING_SNAKE_CASE : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = controlnet_params
__SCREAMING_SNAKE_CASE : List[str] = """Chef in the kitchen"""
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
__SCREAMING_SNAKE_CASE : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__SCREAMING_SNAKE_CASE : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 178 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple="divided_space_time" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames) * self.num_patches_per_frame + 1
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCAmelCase__ = self.num_labels
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
lowerCAmelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowerCAmelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = TimesformerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.num_frames
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def a ( self : List[str] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 |
import operator as op
UpperCamelCase = 'scaler.pt'
UpperCamelCase = 'pytorch_model'
UpperCamelCase = 'random_states'
UpperCamelCase = 'optimizer'
UpperCamelCase = 'scheduler'
UpperCamelCase = 'pytorch_model.bin'
UpperCamelCase = 'pytorch_model.bin.index.json'
UpperCamelCase = 'model.safetensors'
UpperCamelCase = 'model.safetensors.index.json'
UpperCamelCase = '1.10.2'
UpperCamelCase = 'py38'
UpperCamelCase = '4.17.0'
UpperCamelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase = '2.0.1'
UpperCamelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 61 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __get__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> List[str]:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
__UpperCamelCase = '''__cached_''' + self.fget.__name__
__UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if cached is None:
__UpperCamelCase = self.fget(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cached
def A_ ( snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def A_ ( snake_case : Dict ) -> List[Any]:
'''simple docstring'''
if is_torch_fx_proxy(snake_case ):
return True
if is_torch_available():
import torch
if isinstance(snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case , np.ndarray )
def A_ ( snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
return isinstance(snake_case , np.ndarray )
def A_ ( snake_case : int ) -> Tuple:
'''simple docstring'''
return _is_numpy(snake_case )
def A_ ( snake_case : int ) -> List[str]:
'''simple docstring'''
import torch
return isinstance(snake_case , torch.Tensor )
def A_ ( snake_case : str ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(snake_case )
def A_ ( snake_case : Dict ) -> Optional[int]:
'''simple docstring'''
import torch
return isinstance(snake_case , torch.device )
def A_ ( snake_case : int ) -> List[str]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(snake_case )
def A_ ( snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(snake_case , snake_case ):
if hasattr(snake_case , snake_case ):
__UpperCamelCase = getattr(snake_case , snake_case )
else:
return False
return isinstance(snake_case , torch.dtype )
def A_ ( snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(snake_case )
def A_ ( snake_case : List[str] ) -> Dict:
'''simple docstring'''
import tensorflow as tf
return isinstance(snake_case , tf.Tensor )
def A_ ( snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(snake_case )
def A_ ( snake_case : str ) -> Any:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(snake_case )
return type(snake_case ) == tf.Tensor
def A_ ( snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case )
def A_ ( snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case , jnp.ndarray )
def A_ ( snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(snake_case )
def A_ ( snake_case : int ) -> str:
'''simple docstring'''
if isinstance(snake_case , (dict, UserDict) ):
return {k: to_py_obj(snake_case ) for k, v in obj.items()}
elif isinstance(snake_case , (list, tuple) ):
return [to_py_obj(snake_case ) for o in obj]
elif is_tf_tensor(snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case ):
return np.asarray(snake_case ).tolist()
elif isinstance(snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
if isinstance(snake_case , (dict, UserDict) ):
return {k: to_numpy(snake_case ) for k, v in obj.items()}
elif isinstance(snake_case , (list, tuple) ):
return np.array(snake_case )
elif is_tf_tensor(snake_case ):
return obj.numpy()
elif is_torch_tensor(snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case ):
return np.asarray(snake_case )
else:
return obj
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
__UpperCamelCase = getattr(self , class_fields[0].name )
__UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = first_field.items()
__UpperCamelCase = True
else:
try:
__UpperCamelCase = iter(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = True
except TypeError:
__UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE_ ):
if (
not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE_ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCamelCase = element[1]
elif first_field is not None:
__UpperCamelCase = first_field
else:
for field in class_fields:
__UpperCamelCase = getattr(self , field.name )
if v is not None:
__UpperCamelCase = v
def __delitem__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@classmethod
def A__ ( cls , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'longest'
_snake_case = 'max_length'
_snake_case = 'do_not_pad'
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'pt'
_snake_case = 'tf'
_snake_case = 'np'
_snake_case = 'jax'
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = context_managers
__UpperCamelCase = ExitStack()
def __enter__( self )-> List[str]:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE_ )
def __exit__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
self.stack.__exit__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A_ ( snake_case : List[str] ) -> Any:
'''simple docstring'''
__UpperCamelCase = infer_framework(snake_case )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A_ ( snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = model_class.__name__
__UpperCamelCase = infer_framework(snake_case )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A_ ( snake_case : MutableMapping , snake_case : str = "" , snake_case : str = "." ) -> Any:
'''simple docstring'''
def _flatten_dict(snake_case : Optional[Any] , snake_case : Any="" , snake_case : Dict="." ):
for k, v in d.items():
__UpperCamelCase = str(snake_case ) + delimiter + str(snake_case ) if parent_key else k
if v and isinstance(snake_case , snake_case ):
yield from flatten_dict(snake_case , snake_case , delimiter=snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case , snake_case , snake_case ) )
@contextmanager
def A_ ( snake_case : Union[str, Any] , snake_case : bool = False ) -> List[Any]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A_ ( snake_case : List[Any] , snake_case : Tuple=None ) -> str:
'''simple docstring'''
if is_numpy_array(snake_case ):
return np.transpose(snake_case , axes=snake_case )
elif is_torch_tensor(snake_case ):
return array.T if axes is None else array.permute(*snake_case )
elif is_tf_tensor(snake_case ):
import tensorflow as tf
return tf.transpose(snake_case , perm=snake_case )
elif is_jax_tensor(snake_case ):
return jnp.transpose(snake_case , axes=snake_case )
else:
raise ValueError(f"Type not supported for transpose: {type(snake_case )}." )
def A_ ( snake_case : Tuple , snake_case : str ) -> List[str]:
'''simple docstring'''
if is_numpy_array(snake_case ):
return np.reshape(snake_case , snake_case )
elif is_torch_tensor(snake_case ):
return array.reshape(*snake_case )
elif is_tf_tensor(snake_case ):
import tensorflow as tf
return tf.reshape(snake_case , snake_case )
elif is_jax_tensor(snake_case ):
return jnp.reshape(snake_case , snake_case )
else:
raise ValueError(f"Type not supported for reshape: {type(snake_case )}." )
def A_ ( snake_case : Optional[Any] , snake_case : Any=None ) -> Optional[Any]:
'''simple docstring'''
if is_numpy_array(snake_case ):
return np.squeeze(snake_case , axis=snake_case )
elif is_torch_tensor(snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case )
elif is_tf_tensor(snake_case ):
import tensorflow as tf
return tf.squeeze(snake_case , axis=snake_case )
elif is_jax_tensor(snake_case ):
return jnp.squeeze(snake_case , axis=snake_case )
else:
raise ValueError(f"Type not supported for squeeze: {type(snake_case )}." )
def A_ ( snake_case : Dict , snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(snake_case ):
return np.expand_dims(snake_case , snake_case )
elif is_torch_tensor(snake_case ):
return array.unsqueeze(dim=snake_case )
elif is_tf_tensor(snake_case ):
import tensorflow as tf
return tf.expand_dims(snake_case , axis=snake_case )
elif is_jax_tensor(snake_case ):
return jnp.expand_dims(snake_case , axis=snake_case )
else:
raise ValueError(f"Type not supported for expand_dims: {type(snake_case )}." )
def A_ ( snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
if is_numpy_array(snake_case ):
return np.size(snake_case )
elif is_torch_tensor(snake_case ):
return array.numel()
elif is_tf_tensor(snake_case ):
import tensorflow as tf
return tf.size(snake_case )
elif is_jax_tensor(snake_case ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(snake_case )}." )
def A_ ( snake_case : List[Any] , snake_case : int ) -> Optional[int]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(snake_case , (tuple, list) ):
__UpperCamelCase = [f"{repo_id}--{v}" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCamelCase = f"{repo_id}--{value}"
return auto_map
def A_ ( snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for base_class in inspect.getmro(snake_case ):
__UpperCamelCase = base_class.__module__
__UpperCamelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def A_ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case : float ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__magic_name__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ : Any = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
UpperCamelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
UpperCamelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if latents is None:
UpperCamelCase : int = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : List[str] = latents.to(_A )
UpperCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase : List[Any] = torch.device(f"""cuda:{gpu_id}""" )
UpperCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : List[str] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
UpperCamelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self._execution_device
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
UpperCamelCase : List[str] = torch.cat(_A , dim=0 )
UpperCamelCase : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_A , _A ):
UpperCamelCase : Optional[int] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : str = negative_image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
UpperCamelCase : str = self.scheduler.timesteps
UpperCamelCase : Optional[Any] = self.unet.config.in_channels
UpperCamelCase , UpperCamelCase : int = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
UpperCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
UpperCamelCase : Optional[Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Union[str, Any] = variance_pred.chunk(2 )
UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
UpperCamelCase : int = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Any = image * 0.5 + 0.5
UpperCamelCase : Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 102 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase : Dict = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowercase : Union[str, Any] = value
else:
lowercase : Tuple = value
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
lowercase : str = """"""
if is_panoptic:
lowercase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase : Union[str, Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Dict = in_proj_weight[:256, :]
lowercase : Optional[int] = in_proj_bias[:256]
lowercase : Tuple = in_proj_weight[256:512, :]
lowercase : Any = in_proj_bias[256:512]
lowercase : Any = in_proj_weight[-256:, :]
lowercase : Dict = in_proj_bias[-256:]
def _snake_case( ) -> Tuple:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase : Tuple = """resnet101"""
if "dc5" in model_name:
lowercase : List[Any] = True
lowercase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
lowercase : Optional[int] = 250
else:
lowercase : Tuple = 91
lowercase : Any = """huggingface/label-files"""
lowercase : int = """coco-detection-id2label.json"""
lowercase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : int = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase : int = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase : List[Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
# prepare image
lowercase : Dict = prepare_img()
lowercase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : List[str] = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase : Union[str, Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase : str = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase : Optional[int] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowercase : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
lowercase : str = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowercase : List[Any] = conditional_detr(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 336 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase( lowerCAmelCase__ : List[Any] ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def _lowerCamelCase( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict=7_0000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = sigmoid_function(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE_ : Optional[int] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = sigmoid_function(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A = datasets.load_iris()
A = iris.data[:, :2]
A = (iris.target != 0) * 1
A = 0.1
A = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((A) , (A)) = (x[:, 0].min(), x[:, 0].max())
((A) , (A)) = (x[:, 1].min(), x[:, 1].max())
((A) , (A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A = np.c_[xxa.ravel(), xxa.ravel()]
A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show() | 97 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __a ( __A ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE_ : List[Any] = 'pt'
SCREAMING_SNAKE_CASE_ : Tuple = 'tf'
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = FeaturesManager.determine_framework(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE_ : List[str] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = FeaturesManager.determine_framework(self.test_model ) | 97 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase ):
a__: Dict = 'pixel_values'
a__: Any = False
a__: List[Any] = TimmBackboneConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , '''timm''' )
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(UpperCAmelCase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowerCamelCase_ = getattr(UpperCAmelCase , '''use_pretrained_backbone''' , UpperCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ = config.out_indices if getattr(UpperCAmelCase , '''out_indices''' , UpperCAmelCase ) is not None else (-1,)
lowerCamelCase_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ = self._backbone.return_layers
lowerCamelCase_ = {layer['''module''']: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ = kwargs.pop('''config''' , TimmBackboneConfig() )
lowerCamelCase_ = kwargs.pop('''use_timm_backbone''' , UpperCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowerCamelCase_ = kwargs.pop('''num_channels''' , config.num_channels )
lowerCamelCase_ = kwargs.pop('''features_only''' , config.features_only )
lowerCamelCase_ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowerCamelCase_ = kwargs.pop('''out_indices''' , config.out_indices )
lowerCamelCase_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
pass
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ = self._all_layers
lowerCamelCase_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self._return_layers
lowerCamelCase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = None
lowerCamelCase_ = tuple(UpperCAmelCase )
lowerCamelCase_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
lowerCamelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.load(snake_case_ , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase : List[Any] = torch.load(snake_case_ , map_location='cpu' )["model"]
# pop unnecessary weights
_lowerCAmelCase : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
_lowerCAmelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Optional[int] = sd.pop(snake_case_ )
_lowerCAmelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : int = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase : Any = key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase : Dict = torch.split(snake_case_ , depth // 3 , dim=0 )
_lowerCAmelCase : Union[str, Any] = q
_lowerCAmelCase : Optional[Any] = k
_lowerCAmelCase : Union[str, Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = load_checkpoint(snake_case_ )
if config is not None:
_lowerCAmelCase : Tuple = OPTConfig.from_pretrained(snake_case_ )
else:
_lowerCAmelCase : Tuple = OPTConfig()
_lowerCAmelCase : Union[str, Any] = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ,_lowerCamelCase : list[int] ) -> Any:
_lowerCAmelCase : Dict = []
for dlist, weight in zip(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = min(_lowerCamelCase )
_lowerCAmelCase : Any = max(_lowerCamelCase )
_lowerCAmelCase : str = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCAmelCase : List[str] = f"Invalid weight of {weight:f} provided"
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ) -> str:
_lowerCAmelCase : Optional[Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Dict = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ,_lowerCamelCase : list[int] ) -> Optional[int]:
_lowerCAmelCase : Dict = get_data(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = calculate_each_score(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 213 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetDownsampleBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnDownBlockaD # noqa F405
snake_case_ = 'down'
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipDownBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=a_ )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DownEncoderBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnDownEncoderBlockaD # noqa F405
snake_case_ = 'down'
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaD # noqa F405
snake_case_ = 'mid'
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = {
"""in_channels""": 32,
"""temb_channels""": 1_28,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDCrossAttn # noqa F405
snake_case_ = 'mid'
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UNetMidBlockaDSimpleCrossAttn # noqa F405
snake_case_ = 'mid'
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ResnetUpsampleBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CrossAttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SimpleCrossAttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ , include_encoder_hidden_states=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = 32
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SkipUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnSkipUpBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=a_ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = UpDecoderBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(a_ )
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AttnUpDecoderBlockaD # noqa F405
snake_case_ = 'up'
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().get_dummy_input(include_temb=a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(a_ )
| 165 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase_ : str = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = UNetaDModel
__a = "sample"
@property
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Dict= 4
SCREAMING_SNAKE_CASE__: Union[str, Any]= 3
SCREAMING_SNAKE_CASE__: Any= (32, 32)
SCREAMING_SNAKE_CASE__: Optional[Any]= floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return (3, 32, 32)
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (3, 32, 32)
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[str]= {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.dummy_input
return init_dict, inputs_dict
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = UNetaDModel
__a = "sample"
@property
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= 4
SCREAMING_SNAKE_CASE__: Tuple= 4
SCREAMING_SNAKE_CASE__: Union[str, Any]= (32, 32)
SCREAMING_SNAKE_CASE__: Tuple= floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> int:
return (4, 32, 32)
@property
def UpperCamelCase_ ( self ) -> Dict:
return (4, 32, 32)
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE__: int= self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
model_accelerate.to(lowerCAmelCase )
model_accelerate.eval()
SCREAMING_SNAKE_CASE__: int= torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE__: Tuple= noise.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= model_accelerate(lowerCAmelCase , lowerCAmelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase , low_cpu_mem_usage=lowerCAmelCase )
model_normal_load.to(lowerCAmelCase )
model_normal_load.eval()
SCREAMING_SNAKE_CASE__: Optional[Any]= model_normal_load(lowerCAmelCase , lowerCAmelCase )['''sample''']
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE__: Dict= noise.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: List[str]= output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 ) )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = UNetaDModel
__a = "sample"
@property
def UpperCamelCase_ ( self , lowerCAmelCase=(32, 32) ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= 4
SCREAMING_SNAKE_CASE__: Union[str, Any]= 3
SCREAMING_SNAKE_CASE__: List[str]= floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> Any:
return (3, 32, 32)
@property
def UpperCamelCase_ ( self ) -> Dict:
return (3, 32, 32)
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE__: str= self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.dummy_input
SCREAMING_SNAKE_CASE__: Optional[int]= floats_tensor((4, 3) + (256, 256) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= noise
SCREAMING_SNAKE_CASE__: Optional[int]= model(**lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= 4
SCREAMING_SNAKE_CASE__: Dict= 3
SCREAMING_SNAKE_CASE__: Optional[int]= (256, 256)
SCREAMING_SNAKE_CASE__: Tuple= torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor(batch_size * [1e-4] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Tuple= output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-2 ) )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[Any]= UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 4
SCREAMING_SNAKE_CASE__: Dict= 3
SCREAMING_SNAKE_CASE__: Union[str, Any]= (32, 32)
SCREAMING_SNAKE_CASE__: Dict= torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.tensor(batch_size * [1e-4] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: str= output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-2 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
# not required for this model
pass
| 107 | from __future__ import annotations
from statistics import mean
def A__ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Dict= [0] * no_of_processes
SCREAMING_SNAKE_CASE__: Dict= [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: List[str]= burst_time[i]
SCREAMING_SNAKE_CASE__: list[int]= []
SCREAMING_SNAKE_CASE__: List[Any]= 0
SCREAMING_SNAKE_CASE__: List[Any]= 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__: Optional[int]= []
SCREAMING_SNAKE_CASE__: List[str]= -1
for i in range(snake_case_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Optional[int]= ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__: int= i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__: str= 0
SCREAMING_SNAKE_CASE__: Tuple= (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : list[int] ):
SCREAMING_SNAKE_CASE__: List[str]= [0] * no_of_processes
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Dict= burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowercase_ : List[Any] = 4
lowercase_ : str = [2, 5, 3, 7]
lowercase_ : Union[str, Any] = [0, 0, 0, 0]
lowercase_ : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase_ : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 107 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=13 , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: str=False , UpperCamelCase_: Any=False , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=99 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: List[str]=5 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=512 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Dict="last" , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , ) -> str:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: int , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
lowercase__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , ) -> int:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: int , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Any=False ) -> Tuple:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) )
lowercase__ = torch.jit.load(os.path.join(UpperCamelCase_ , '''traced_model.pt''' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCamelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCamelCase_ ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase_ )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 43 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str )-> int:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple , lowerCAmelCase: str="attention" )-> List[Any]:
_snake_case : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case : List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any]=False )-> int:
if split_mlp_wi:
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case : List[str] = (wi_a, wi_a)
else:
_snake_case : Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case : Any = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: int , lowerCAmelCase: Optional[int] )-> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase_ ( lowerCAmelCase: dict , *, lowerCAmelCase: int , lowerCAmelCase: bool , lowerCAmelCase: bool = False )-> str:
_snake_case : List[Any] = traverse_util.flatten_dict(variables['target'] )
_snake_case : Dict = {'/'.join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case : str = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase )
_snake_case : Any = collections.OrderedDict()
# Shared embeddings.
_snake_case : Optional[int] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Tuple = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : Tuple = q.T
_snake_case : Optional[Any] = v.T
# Block i, layer 1 (MLP).
_snake_case : Any = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : List[Any] = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , lowerCAmelCase )
_snake_case : int = layer_norm
if split_mlp_wi:
_snake_case : Union[str, Any] = wi[0].T
_snake_case : Any = wi[1].T
else:
_snake_case : str = wi.T
_snake_case : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Optional[Any] = tax_relpos_bias_lookup(
lowerCAmelCase , lowerCAmelCase , 'encoder' ).T
_snake_case : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
_snake_case : Any = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'encoder' ).T
_snake_case : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : List[str] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'self_attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : str = q.T
_snake_case : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'encoder_decoder_attention' )
_snake_case : Optional[Any] = layer_norm
_snake_case : List[str] = k.T
_snake_case : Optional[Any] = o.T
_snake_case : Union[str, Any] = q.T
_snake_case : List[Any] = v.T
# Block i, layer 2 (MLP).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : Tuple = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , lowerCAmelCase )
_snake_case : str = layer_norm
if split_mlp_wi:
_snake_case : List[str] = wi[0].T
_snake_case : List[str] = wi[1].T
else:
_snake_case : Union[str, Any] = wi.T
_snake_case : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Dict = tax_relpos_bias_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' ).T
_snake_case : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case : Union[str, Any] = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: bool )-> int:
_snake_case : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case : str = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case : List[str] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_snake_case : Dict = state_dict['shared.weight']
return state_dict
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Optional[int]:
_snake_case : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_snake_case : Optional[int] = convert_tax_to_pytorch(
lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase , scalable_attention=lowerCAmelCase )
_snake_case : Tuple = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[Any] , lowerCAmelCase: Dict , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , )-> Tuple:
_snake_case : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case : int = UMTaEncoderModel(lowerCAmelCase )
else:
_snake_case : int = UMTaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 411 | 0 |
import argparse
A = "docs/source/_static/js/custom.js"
def __UpperCAmelCase ( __A ) -> int:
'''simple docstring'''
with open(__A , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
UpperCAmelCase__ = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(__A , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__A )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A = parser.parse_args()
update_custom_js(args.version)
| 277 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 277 | 1 |
class lowercase_ :
def __init__( self) -> str:
a__ =0
a__ =0
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> Dict:
if vertex not in self.adjacency:
a__ ={}
self.num_vertices += 1
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
self.add_vertex(lowercase_)
self.add_vertex(lowercase_)
if head == tail:
return
a__ =weight
a__ =weight
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for i in range(len(lowercase_)):
a__ =list(edges[i])
edges.sort(key=lambda lowercase_: e[2])
for i in range(len(lowercase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
a__ =edges[i][2] + 1
for edge in edges:
a__ , a__ , a__ =edge
a__ =weight
a__ =weight
def __str__( self) -> Tuple:
a__ =''
for tail in self.adjacency:
for head in self.adjacency[tail]:
a__ =self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n')
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __UpperCamelCase ( self) -> int:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_=None , lowercase_=None) -> Dict:
a__ =Graph()
if vertices is None:
a__ =[]
if edges is None:
a__ =[]
for vertex in vertices:
g.add_vertex(lowercase_)
for edge in edges:
g.add_edge(*lowercase_)
return g
class lowercase_ :
def __init__( self) -> Tuple:
a__ ={}
a__ ={}
def __len__( self) -> Dict:
return len(self.parent)
def __UpperCamelCase ( self , lowercase_) -> Dict:
if item in self.parent:
return self.find(lowercase_)
a__ =item
a__ =0
return item
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowercase_)
if item != self.parent[item]:
a__ =self.find(self.parent[item])
return self.parent[item]
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Dict:
a__ =self.find(lowercase_)
a__ =self.find(lowercase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a__ =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[Any]:
a__ =graph.num_vertices
a__ =Graph.UnionFind()
a__ =[]
while num_components > 1:
a__ ={}
for vertex in graph.get_vertices():
a__ =-1
a__ =graph.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for edge in edges:
a__ , a__ , a__ =edge
a__ =union_find.find(lowercase_)
a__ =union_find.find(lowercase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a__ , a__ , a__ =cheap_edge[vertex]
if union_find.find(lowercase_) != union_find.find(lowercase_):
union_find.union(lowercase_ , lowercase_)
mst_edges.append(cheap_edge[vertex])
a__ =num_components - 1
a__ =Graph.build(edges=lowercase_)
return mst
| 20 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = GPTSanJapaneseTokenizer
UpperCamelCase = False
UpperCamelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase ( self :str ):
super().setUp()
# fmt: off
A = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
A = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
A = {"unk_token": "<unk>"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__UpperCamelCase ) )
def lowerCamelCase ( self :Union[str, Any] , **__UpperCamelCase :List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :str , __UpperCamelCase :Tuple ):
A = "こんにちは、世界。 \nこんばんは、㔺界。😀"
A = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
A, A = self.get_input_output_texts(__UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def lowerCamelCase ( self :Optional[int] ):
pass # TODO add if relevant
def lowerCamelCase ( self :Optional[int] ):
pass # TODO add if relevant
def lowerCamelCase ( self :Tuple ):
pass # TODO add if relevant
def lowerCamelCase ( self :List[Any] ):
A = self.get_tokenizer()
# Testing tokenization
A = "こんにちは、世界。 こんばんは、㔺界。"
A = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
A = tokens + [tokenizer.unk_token]
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
# Testing tokenization
A = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
A = "こんにちは、、、、世界。こんばんは、、、、世界。"
A = tokenizer.encode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase ( self :str ):
A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
A = "こんにちは、世界。"
A = "こんばんは、㔺界。😀"
A = "こんにちは、世界。こんばんは、世界。😀"
A = tokenizer.encode(prefix_text + input_text )
A = tokenizer.encode("" , prefix_text=prefix_text + input_text )
A = tokenizer.encode(__UpperCamelCase , prefix_text=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase ( self :Union[str, Any] ):
A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
A = "こんにちは、世界。"
A = "こんばんは、㔺界。😀"
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = [1] + [0] * (len_prefix + len_text + 1)
A = [1] * (len_prefix + len_text + 1) + [0]
A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A = tokenizer(prefix_text + input_text ).token_type_ids
A = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
A = tokenizer(__UpperCamelCase , prefix_text=__UpperCamelCase ).token_type_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase ( self :int ):
A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
A = tokenizer.encode("あンいワ" )
A = tokenizer.encode("" , prefix_text="あンいワ" )
A = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self :List[Any] ):
A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
A = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
A = tokenizer.batch_encode_plus(__UpperCamelCase , padding=__UpperCamelCase )
# fmt: off
A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token.attention_mask , __UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , __UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self :Optional[int] ):
# tokenizer has no padding token
pass
| 524 |
"""simple docstring"""
from itertools import permutations
def A__ ( UpperCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A__ ( UpperCamelCase = 10 ):
return sum(
int("".join(map(UpperCamelCase , UpperCamelCase ) ) )
for num in permutations(range(UpperCamelCase ) )
if is_substring_divisible(UpperCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 524 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''MCTCTFeatureExtractor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __call__( self : List[str] , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("text" , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def lowercase__ ( self : List[str] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("input_features" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("labels" , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def lowercase__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@contextmanager
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 82 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ ={
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
UpperCAmelCase__ =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase__ =BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A , output_all_encodings=A , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase__ ="openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase__ =os.path.join(get_home_dir() , "models" )
UpperCAmelCase__ =_load_vocab(A , A , A , cls=A )
UpperCAmelCase__ =nlp.model.BERTModel(
A , len(A ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A , use_token_type_embed=A , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A , use_decoder=A , )
original_bort.load_parameters(A , cast_dtype=A , ignore_extra=A )
UpperCAmelCase__ =original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase__ ={
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(A ),
}
UpperCAmelCase__ =BertConfig.from_dict(A )
UpperCAmelCase__ =BertForMaskedLM(A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A , A ):
UpperCAmelCase__ =hf_param.shape
UpperCAmelCase__ =to_torch(params[gluon_param] )
UpperCAmelCase__ =gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase__ =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase__ =hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase__ =layer.attention.self
UpperCAmelCase__ =check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
UpperCAmelCase__ =layer.attention.output
UpperCAmelCase__ =check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
UpperCAmelCase__ =check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
UpperCAmelCase__ =layer.intermediate
UpperCAmelCase__ =check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
UpperCAmelCase__ =check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
UpperCAmelCase__ =layer.output
UpperCAmelCase__ =check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase__ =RobertaTokenizer.from_pretrained("roberta-base" )
UpperCAmelCase__ =tokenizer.encode_plus(A )["input_ids"]
# Get gluon output
UpperCAmelCase__ =mx.nd.array([input_ids] )
UpperCAmelCase__ =original_bort(inputs=A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A )
UpperCAmelCase__ =BertModel.from_pretrained(A )
hf_bort_model.eval()
UpperCAmelCase__ =tokenizer.encode_plus(A , return_tensors="pt" )
UpperCAmelCase__ =hf_bort_model(**A )[0]
UpperCAmelCase__ =output_gluon[0].asnumpy()
UpperCAmelCase__ =output_hf[0].detach().numpy()
UpperCAmelCase__ =np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase__ =np.allclose(A , A , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 625 | 0 |
def __A ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_A = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : list[float] , _lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_lowerCamelCase ):
print(f'''{i}\t\t{d}''' )
def _UpperCamelCase (_lowerCamelCase : list[dict[str, int]] , _lowerCamelCase : list[float] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
for j in range(_lowerCamelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _UpperCamelCase (_lowerCamelCase : list[dict[str, int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> list[float]:
'''simple docstring'''
__snake_case = [float('''inf''' )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowerCamelCase ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = int(input('''Enter number of vertices: ''').strip())
UpperCAmelCase_ : List[str] = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCAmelCase_ : Optional[Any] = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCAmelCase_ : Optional[Any] = int(input('''\nEnter shortest path source:''').strip())
UpperCAmelCase_ : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 24 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case__ ( a ) -> str:
'''simple docstring'''
snake_case__ = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def snake_case__ ( a ) -> str:
'''simple docstring'''
snake_case__ = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def snake_case__ ( ) -> Any:
'''simple docstring'''
snake_case__ = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def snake_case__ ( a , a , a , a ) -> List[str]:
'''simple docstring'''
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = num_labels
snake_case__ = json.load(open(cached_download(hf_hub_url(a , a , repo_type="""dataset""" ) ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = snake_case__ = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case__ = [2, 2, 20]
snake_case__ = [3, 12, 16]
snake_case__ = [192, 768, 1024]
snake_case__ = CvtForImageClassification(a )
snake_case__ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case__ = image_size
snake_case__ = torch.load(a , map_location=torch.device("""cpu""" ) )
snake_case__ = OrderedDict()
snake_case__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case__ = list_of_state_dict + cls_token(a )
snake_case__ = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
snake_case__ = list_of_state_dict + attention(a , a )
snake_case__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
snake_case__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 566 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __magic_name__( unittest.TestCase ):
@slow
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
snake_case__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCamelCase )
from datasets import load_dataset
snake_case__ = load_dataset("""nielsr/rvlcdip-demo""" )
snake_case__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
snake_case__ = image_processor(__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(**__UpperCamelCase )
snake_case__ = outputs.logits
snake_case__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case__ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=__UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) | 566 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=640 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=None , ) ->List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = last_hidden_size
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = output_stride
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = MobileViTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = MobileViTModelTester(self )
lowerCAmelCase = MobileViTConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileViTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] )
lowerCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 312 | lowercase__ : Union[str, Any] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 312 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any ) -> int:
"""simple docstring"""
_validate_point(__UpperCamelCase )
_validate_point(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__UpperCamelCase , __UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if point:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for item in point:
if not isinstance(__UpperCamelCase , (int, float) ):
SCREAMING_SNAKE_CASE__ = (
"""Expected a list of numbers as input, found """
f"""{type(__UpperCamelCase ).__name__}"""
)
raise TypeError(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = f"""Expected a list of numbers as input, found {type(__UpperCamelCase ).__name__}"""
raise TypeError(__UpperCamelCase )
else:
raise ValueError("""Missing an input""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
_validate_point(__UpperCamelCase )
_validate_point(__UpperCamelCase )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__UpperCamelCase , __UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 379 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case = 1_6
_snake_case = 3_2
def __snake_case ( SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
return int(x / 2**20 )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __enter__( self : List[str] ) -> Tuple:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_lowerCAmelCase = torch.cuda.memory_allocated()
return self
def __exit__( self : int , *UpperCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
_lowerCAmelCase = torch.cuda.memory_allocated()
_lowerCAmelCase = torch.cuda.max_memory_allocated()
_lowerCAmelCase = bamb(self.end - self.begin )
_lowerCAmelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __snake_case ( SCREAMING_SNAKE_CASE: Accelerator , SCREAMING_SNAKE_CASE: int = 16 , SCREAMING_SNAKE_CASE: str = "bert-base-cased" , SCREAMING_SNAKE_CASE: int = 320 , SCREAMING_SNAKE_CASE: int = 160 , ):
"""simple docstring"""
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(SCREAMING_SNAKE_CASE: int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE: List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config['lr']
_lowerCAmelCase = int(config['num_epochs'] )
_lowerCAmelCase = int(config['seed'] )
_lowerCAmelCase = int(config['batch_size'] )
_lowerCAmelCase = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
_lowerCAmelCase = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
# Now we train the model
_lowerCAmelCase = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_lowerCAmelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 580 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_snake_case = {'''allegro/herbert-base-cased''': 5_1_4}
_snake_case = {}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: int = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: Optional[Any] = HerbertTokenizer
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : int="<mask>" , UpperCAmelCase_ : Tuple="</s>" , **UpperCAmelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 580 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self):
snake_case_ : Any = ""
snake_case_ : Optional[int] = ""
snake_case_ : List[str] = []
snake_case_ : Any = 0
snake_case_ : Tuple = 2_56
snake_case_ : Optional[Any] = 0
snake_case_ : List[str] = 0
snake_case_ : int = 0
snake_case_ : str = 0
def snake_case__ ( self , lowercase_):
snake_case_ : Any = cva.imread(lowercase_ , 0)
snake_case_ : str = copy.deepcopy(self.img)
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="x")
snake_case_ : Union[str, Any] = np.sum(lowercase_)
for i in range(len(lowercase_)):
snake_case_ : List[str] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Tuple = int(last % last)
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(lowercase_)
snake_case_ : Union[str, Any] = int(np.ma.count(self.img) / self.img[1].size)
snake_case_ : str = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
snake_case_ : str = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img)
def snake_case__ ( self):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56])
def snake_case__ ( self):
cva.imshow("Output-Image" , self.img)
cva.imshow("Input-Image" , self.original_image)
cva.waitKey(50_00)
cva.destroyAllWindows()
if __name__ == "__main__":
a_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
a_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 92 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowercase ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n """,
}
@require_torch
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
UpperCAmelCase__ : Dict = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase__ : str = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
UpperCAmelCase__ : List[str] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase__ : Optional[Any] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 65 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : int = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self : int, _snake_case : bool = True ):
'''simple docstring'''
snake_case : dict[T, list[T]] ={} # dictionary of lists
snake_case : Optional[int] =directed
def __snake_case ( self : Any, _snake_case : T, _snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
snake_case : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case : Union[str, Any] =[destination_vertex]
snake_case : str =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Optional[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case : int =[destination_vertex]
snake_case : Optional[Any] =[]
return self
def __repr__( self : int ):
'''simple docstring'''
return pformat(self.adj_list )
| 349 | 0 |
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase , lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase , lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase , lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = os.path.dirname(lowerCAmelCase )
_UpperCamelCase = os.path.basename(lowerCAmelCase )
_UpperCamelCase = onnx.load(os.path.join(lowerCAmelCase , lowerCAmelCase ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase )
dup_set.add(lowerCAmelCase )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_UpperCamelCase = sorted(lowerCAmelCase )
_remove_dup_initializers_from_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowerCAmelCase , lowerCAmelCase )
onnx.save(lowerCAmelCase , lowerCAmelCase )
return new_model
| 105 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = mock.Mock()
_UpperCamelCase = 500
_UpperCamelCase = {}
_UpperCamelCase = HTTPError
_UpperCamelCase = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=A ) as mock_head:
_UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = mock.Mock()
_UpperCamelCase = 500
_UpperCamelCase = {}
_UpperCamelCase = HTTPError
_UpperCamelCase = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=A ) as mock_head:
_UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ):
"""simple docstring"""
try:
_UpperCamelCase = tempfile.mktemp()
with open(A, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', A )
_UpperCamelCase = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __A( unittest.TestCase ):
__A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCamelCase = TOKEN
HfFolder.save_token(A )
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizer(A )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A, repo_id='''test-tokenizer''', push_to_hub=A, use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def _UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizer(A )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=A, use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def _UpperCamelCase ( self ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
_UpperCamelCase = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=A, trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
_UpperCamelCase = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(A, ['''AB''', '''C'''] )
| 105 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Any = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 635 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 635 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
'''simple docstring'''
__snake_case = 0
__snake_case = len(_lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case = i + 1
else:
__snake_case = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 473 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase( _a ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=7_6_8 ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
__snake_case = proj_size
__snake_case = CLIPVisionModel(SCREAMING_SNAKE_CASE )
__snake_case = PaintByExampleMapper(SCREAMING_SNAKE_CASE )
__snake_case = nn.LayerNorm(config.hidden_size )
__snake_case = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=False ) -> Dict:
'''simple docstring'''
__snake_case = self.model(pixel_values=SCREAMING_SNAKE_CASE )
__snake_case = clip_output.pooler_output
__snake_case = self.mapper(latent_states[:, None] )
__snake_case = self.final_layer_norm(SCREAMING_SNAKE_CASE )
__snake_case = self.proj_out(SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case = (config.num_hidden_layers + 1) // 5
__snake_case = config.hidden_size
__snake_case = 1
__snake_case = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , activation_fn="gelu" , attention_bias=SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE )
] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
for block in self.blocks:
__snake_case = block(SCREAMING_SNAKE_CASE )
return hidden_states
| 473 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 312 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : List[Any] = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 312 | 1 |
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__A = _modexpt(lowerCAmelCase__ , exponent // 2 , lowerCAmelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCAmelCase__ , exponent - 1 , lowerCAmelCase__ )) % modulo_value
def UpperCAmelCase ( lowerCAmelCase__ = 1777 , lowerCAmelCase__ = 1855 , lowerCAmelCase__ = 8 ):
'''simple docstring'''
__A = base
for _ in range(1 , lowerCAmelCase__ ):
__A = _modexpt(lowerCAmelCase__ , lowerCAmelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 |
snake_case_ : str =[0, 2, 4, 6, 8]
snake_case_ : List[str] =[1, 3, 5, 7, 9]
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A = 0
for digit in range(10 ):
__A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase__ , lowerCAmelCase__ )
return result
__A = 0
for digita in range(10 ):
__A = digita
if (remainder + digita) % 2 == 0:
__A = ODD_DIGITS
else:
__A = EVEN_DIGITS
for digita in other_parity_digits:
__A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase__ , lowerCAmelCase__ , )
return result
def UpperCAmelCase ( lowerCAmelCase__ = 9 ):
'''simple docstring'''
__A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ , 0 , [0] * length , lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 205 | 0 |
import operator as op
snake_case : Optional[int] = '''scaler.pt'''
snake_case : Dict = '''pytorch_model'''
snake_case : Any = '''random_states'''
snake_case : Optional[int] = '''optimizer'''
snake_case : List[str] = '''scheduler'''
snake_case : str = '''pytorch_model.bin'''
snake_case : Tuple = '''pytorch_model.bin.index.json'''
snake_case : Union[str, Any] = '''model.safetensors'''
snake_case : Optional[int] = '''model.safetensors.index.json'''
snake_case : Tuple = '''1.10.2'''
snake_case : Optional[Any] = '''py38'''
snake_case : Tuple = '''4.17.0'''
snake_case : Union[str, Any] = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
snake_case : Union[str, Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
snake_case : str = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
snake_case : Optional[int] = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
snake_case : Optional[Any] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
snake_case : List[str] = '''2.0.1'''
snake_case : Any = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
snake_case : Any = ['''default''', '''reduce-overhead''', '''max-autotune''']
snake_case : Optional[int] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case : Any = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
snake_case : Tuple = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
snake_case : Optional[int] = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 335 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Any ,*__snake_case :str ,**__snake_case :int ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' ,__snake_case ,)
super().__init__(*__snake_case ,**__snake_case )
| 335 | 1 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A : Any = None
__A : Union[str, Any] = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A : int = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "PIL.Image.Image"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
SCREAMING_SNAKE_CASE_ : str = field(default="Image" , init=_A , repr=_A )
def __call__( self : Optional[Any] ) -> Optional[Any]:
return self.pa_type
def A ( self : Any , A : Optional[Any] ) -> Optional[Any]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(A , A ):
lowercase_ : Optional[Any] = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A ( self : str , A : str , A : Optional[int]=None ) -> str:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowercase_ : List[Any] = {}
lowercase_ , lowercase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(A ):
lowercase_ : Tuple = PIL.Image.open(A )
else:
lowercase_ : Optional[Any] = path.split('''::''' )[-1]
try:
lowercase_ : Optional[Any] = string_to_dict(A , config.HUB_DATASETS_URL )['''repo_id''']
lowercase_ : Any = token_per_repo_id.get(A )
except ValueError:
lowercase_ : Tuple = None
with xopen(A , '''rb''' , use_auth_token=A ) as f:
lowercase_ : List[Any] = BytesIO(f.read() )
lowercase_ : List[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A ( self : Tuple ) -> Tuple:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def A ( self : Tuple , A : Dict ) -> Dict:
if pa.types.is_string(storage.type ):
lowercase_ : Tuple = pa.array([None] * len(A ) , type=pa.binary() )
lowercase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(A ) , type=pa.string() )
lowercase_ : int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase_ : List[str] = storage.field('''bytes''' )
else:
lowercase_ : Dict = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase_ : Any = storage.field('''path''' )
else:
lowercase_ : List[Any] = pa.array([None] * len(A ) , type=pa.string() )
lowercase_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : str = pa.array(
[encode_np_array(np.array(A ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : int = pa.array([None] * len(A ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def A ( self : List[str] , A : Optional[int] ) -> int:
@no_op_if_value_is_null
def path_to_bytes(A : Tuple ):
with xopen(A , '''rb''' ) as f:
lowercase_ : Any = f.read()
return bytes_
lowercase_ : Any = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Optional[Any] = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase_ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def lowercase ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : str = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase ( __snake_case : "PIL.Image.Image" ):
lowercase_ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : Union[str, Any] = image.format
else:
lowercase_ : Dict = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def lowercase ( __snake_case : "PIL.Image.Image" ):
if hasattr(_UpperCamelCase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def lowercase ( __snake_case : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowercase_ : int = array.dtype
lowercase_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
lowercase_ : List[Any] = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : List[str] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : Dict = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : Tuple = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
lowercase_ : List[str] = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Tuple = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def lowercase ( __snake_case : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowercase_ , lowercase_ : List[Any] = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
lowercase_ : Optional[int] = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 716 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A : List[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__A : Optional[int] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__A : List[str] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : Dict = [0] * args.vocab_size
for k, v in counter.items():
__A : Union[str, Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 141 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> int:
'''simple docstring'''
__snake_case = size if size is not None else {'''height''': 20, '''width''': 20}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = size
__snake_case = do_normalize
__snake_case = do_convert_rgb
__snake_case = [512, 1024, 2048, 4096]
__snake_case = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__snake_case = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : str = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.image_processor_tester.prepare_dummy_image()
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = 2048
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__snake_case = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
__snake_case = '''Hello'''
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self , num_channels=4 )
__snake_case = 3
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 24 |
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }') | 16 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Optional[int] = DebertaTokenizer
A : List[Any] = True
A : Tuple = DebertaTokenizerFast
def snake_case__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case : List[str] = {"""unk_token""": """[UNK]"""}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def snake_case__ ( self : List[Any] , **_lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
__snake_case : Tuple = """lower newer"""
__snake_case : Tuple = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Optional[Any] ):
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Dict = """lower newer"""
__snake_case : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case : Optional[Any] = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
__snake_case : Any = self.get_tokenizer()
__snake_case : Any = tokenizer("""Hello""" , """World""" )
__snake_case : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _lowerCAmelCase )
@slow
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Dict = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__snake_case : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self : str ):
__snake_case : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case : int = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__snake_case : Any = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__snake_case : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase )
__snake_case : Union[str, Any] = [tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
__snake_case : List[str] = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case : Tuple = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , _lowerCAmelCase )
for expected, decoded in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 390 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "camembert"
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_05_22 , _lowerCAmelCase : str=7_68 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Optional[Any]=30_72 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=5_12 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[Any] = use_cache
__snake_case : str = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 390 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 638 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 200_0000 ) -> int:
'''simple docstring'''
snake_case : list[int] = [0]
snake_case : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case : int = 0
# an estimate of b, using the quadratic formula
snake_case : float
# the largest integer less than b_estimate
snake_case : int
# the largest integer less than b_estimate
snake_case : int
# the triangle number corresponding to b_floor
snake_case : int
# the triangle number corresponding to b_ceil
snake_case : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case : Optional[int] = floor(SCREAMING_SNAKE_CASE__ )
snake_case : int = ceil(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = triangle_numbers[b_floor]
snake_case : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case : str = triangle_b_first_guess * triangle_a
snake_case : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case : List[Any] = triangle_b_second_guess * triangle_a
snake_case : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 638 | 1 |
def lowerCAmelCase ( snake_case__ : int = 10 )-> str:
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError("Invalid input" )
A_ = 10**n
A_ = 28433 * (pow(2 , 7830457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['GLPNFeatureExtractor']
__magic_name__ : Tuple = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowerCamelCase = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def a ( __UpperCAmelCase : str = "dhaka" , __UpperCAmelCase : int = 5 ) -> int:
__magic_name__: Tuple = min(__UpperCAmelCase , 5_0 ) # Prevent abuse!
__magic_name__: Tuple = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
__magic_name__: Optional[Any] = requests.get("""https://www.google.com/search""" , params=__UpperCAmelCase , headers=__UpperCAmelCase )
__magic_name__: Tuple = BeautifulSoup(html.text , """html.parser""" )
__magic_name__: str = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
__magic_name__: List[Any] = json.dumps(__UpperCAmelCase )
__magic_name__: int = json.loads(__UpperCAmelCase )
__magic_name__: Optional[Any] = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __UpperCAmelCase , )
if not matched_google_image_data:
return 0
__magic_name__: int = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__UpperCAmelCase ) , )
__magic_name__: Tuple = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __UpperCAmelCase , )
for index, fixed_full_res_image in enumerate(__UpperCAmelCase ):
if index >= max_images:
return index
__magic_name__: List[str] = bytes(__UpperCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
__magic_name__: Any = bytes(__UpperCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
__magic_name__: Any = urllib.request.build_opener()
__magic_name__: Union[str, Any] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(__UpperCAmelCase )
__magic_name__: Optional[int] = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCAmelCase , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__lowerCamelCase = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 96 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "sentencepiece.bpe.model"}
A_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: int="<s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: str=None , UpperCamelCase_: Any=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ =AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ =legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ =1
UpperCamelCase_ =len(self.sp_model )
UpperCamelCase_ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase_ ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ =src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ =self.lang_code_to_id[self._src_lang]
UpperCamelCase_ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: List[Any] ):
UpperCamelCase_ =self.__dict__.copy()
UpperCamelCase_ =None
UpperCamelCase_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self: int , UpperCamelCase_: List[str] ):
UpperCamelCase_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ ={}
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self: List[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self: Any ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =[1] * len(self.prefix_tokens )
UpperCamelCase_ =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ =src_lang
UpperCamelCase_ =self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase_ =tgt_lang_id
return inputs
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ =self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str ):
UpperCamelCase_ ="".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ =os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
UpperCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Dict , ):
UpperCamelCase_ =src_lang
UpperCamelCase_ =tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self: List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
UpperCamelCase_ =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
UpperCamelCase_ =self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
| 391 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Any = """esm"""
def __init__( self : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Optional[Any]=3072 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]=1026 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Union[str, Any] , ):
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =emb_layer_norm_before
_lowerCAmelCase =token_dropout
_lowerCAmelCase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_lowerCAmelCase =EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase =EsmFoldConfig(**lowerCamelCase_ )
_lowerCAmelCase =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_lowerCAmelCase =get_default_vocab_list()
else:
_lowerCAmelCase =vocab_list
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_ ):
_lowerCAmelCase =self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: str = None
a_: bool = True
a_: bool = False
a_: bool = False
a_: bool = False
a_: float = 0
a_: bool = True
a_: bool = False
a_: int = 1_28
a_: "TrunkConfig" = None
def lowerCAmelCase__ ( self : str ):
if self.trunk is None:
_lowerCAmelCase =TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_ ):
_lowerCAmelCase =TrunkConfig(**self.trunk )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 48
a_: int = 10_24
a_: int = 1_28
a_: int = 32
a_: int = 32
a_: int = 32
a_: float = 0
a_: float = 0
a_: bool = False
a_: int = 4
a_: Optional[int] = 1_28
a_: "StructureModuleConfig" = None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.structure_module is None:
_lowerCAmelCase =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_ ):
_lowerCAmelCase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowerCAmelCase =self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 3_84
a_: int = 1_28
a_: int = 16
a_: int = 1_28
a_: int = 12
a_: int = 4
a_: int = 8
a_: float = 0.1
a_: int = 8
a_: int = 1
a_: int = 2
a_: int = 7
a_: int = 10
a_: float = 1e-8
a_: float = 1e5
def lowerCAmelCase__ ( self : int ):
return asdict(self )
def snake_case_ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 149 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =BlipImageProcessor()
_lowerCAmelCase =BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
_lowerCAmelCase =BlipProcessor(lowerCamelCase_ , lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).tokenizer
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase_ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCAmelCase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase =[Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase =self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_lowerCAmelCase =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =image_processor(lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =processor(images=lowerCamelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =processor(text=lowerCamelCase_ )
_lowerCAmelCase =tokenizer(lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase =processor.batch_decode(lowerCamelCase_ )
_lowerCAmelCase =tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =BlipProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_lowerCAmelCase ="""lower newer"""
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 149 | 1 |
'''simple docstring'''
UpperCamelCase__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCamelCase__: str = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCamelCase__: Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 127 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =args.log_outputs
SCREAMING_SNAKE_CASE__ ="""_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE__ =load_metric("""wer""" )
SCREAMING_SNAKE_CASE__ =load_metric("""cer""" )
# compute metrics
SCREAMING_SNAKE_CASE__ =wer.compute(references=result["""target"""], predictions=result["""prediction"""] )
SCREAMING_SNAKE_CASE__ =cer.compute(references=result["""target"""], predictions=result["""prediction"""] )
# print & log results
SCREAMING_SNAKE_CASE__ =f"""WER: {wer_result}\nCER: {cer_result}"""
print(__UpperCamelCase )
with open(f"""{dataset_id}_eval_results.txt""", """w""" ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE__ =f"""log_{dataset_id}_predictions.txt"""
SCREAMING_SNAKE_CASE__ =f"""log_{dataset_id}_targets.txt"""
with open(__UpperCamelCase, """w""" ) as p, open(__UpperCamelCase, """w""" ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase, __UpperCamelCase ):
p.write(f"""{i}""" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"""{i}""" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(__UpperCamelCase, with_indices=__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ="""[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE__ =re.sub(__UpperCamelCase, """""", text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE__ =["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE__ =""" """.join(text.split(__UpperCamelCase ) )
return text
def UpperCAmelCase_ ( __UpperCamelCase ):
# load dataset
SCREAMING_SNAKE_CASE__ =load_dataset(args.dataset, args.config, split=args.split, use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE__ =AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE__ =feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE__ =dataset.cast_column("""audio""", Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE__ =0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE__ =pipeline("""automatic-speech-recognition""", model=args.model_id, device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =asr(
batch["""audio"""]["""array"""], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE__ =prediction["""text"""]
SCREAMING_SNAKE_CASE__ =normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE__ =dataset.map(__UpperCamelCase, remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
lowerCamelCase_ = parser.parse_args()
main(args)
| 151 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=5 ) -> Dict:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_snake_case = torch.tensor(tokenizer.encode(__A , add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
_snake_case = model(__A )[0] # The last hidden-state is the first element of the output tuple
_snake_case = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_snake_case = logits[0, masked_index, :]
_snake_case = logits.softmax(dim=0 )
_snake_case , _snake_case = prob.topk(k=__A , dim=0 )
_snake_case = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
_snake_case = tokenizer.mask_token
_snake_case = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_snake_case = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(__A ) , __A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A , __A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase : str = CamembertTokenizer.from_pretrained("camembert-base")
lowercase : str = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
lowercase : Tuple = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 542 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase : Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase : List[Any] = {
"facebook/blenderbot_small-90M": 512,
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = BlenderbotSmallTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase_ , merges=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , ) , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = add_prefix_space
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 542 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 62 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = IFInpaintingPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self : Tuple ):
return self._get_dummy_components()
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def snake_case ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : Optional[int] ):
self._test_save_load_local()
def snake_case ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCAmelCase = """base_with_context"""
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
A_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A_ : Any = weights[f"""layers_{lyr_num}"""]
A_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A_ : Dict = ly_weight["""attention"""]
A_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A_ : int = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A_ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A_ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
A_ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A_ : str = weights[f"""layers_{lyr_num}"""]
A_ : Optional[Any] = ly_weight["""attention"""]
A_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
A_ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A_ : str = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A_ : List[str] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
A_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) ,requires_grad=_lowerCAmelCase )
A_ : Any = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A_ : List[Any] = weights[f"""layers_{lyr_num}"""]
A_ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
A_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A_ : int = ly_weight["""self_attention"""]
A_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A_ : int = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A_ : int = ly_weight["""MultiHeadDotProductAttention_0"""]
A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
A_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
A_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
A_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
A_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
A_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
A_ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A_ : int = jnp.tree_util.tree_map(onp.array ,_lowerCAmelCase )
A_ : Dict = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
A_ : int = os.path.join(args.checkpoint_path ,"""..""" ,"""config.gin""" )
A_ : int = inference.parse_training_gin_file(_lowerCAmelCase ,_lowerCAmelCase )
A_ : Dict = inference.InferenceModel(args.checkpoint_path ,_lowerCAmelCase )
A_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ,variance_type="""fixed_large""" )
A_ : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
A_ : Optional[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["""targets_context"""] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="""gated-gelu""" ,)
A_ : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["""targets_context"""] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
A_ : Tuple = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] ,_lowerCAmelCase )
A_ : int = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] ,_lowerCAmelCase )
A_ : Optional[int] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] ,_lowerCAmelCase )
A_ : int = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
A_ : Optional[int] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase ,continuous_encoder=_lowerCAmelCase ,decoder=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,melgan=_lowerCAmelCase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_lowerCAmelCase = parser.parse_args()
main(args)
| 569 |
import os
from math import logaa
def _lowerCAmelCase ( _lowerCAmelCase = "base_exp.txt" ):
'''simple docstring'''
A_ : float = 0
A_ : int = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) ,_lowerCAmelCase ) ) ):
A_ , A_ : str = list(map(_lowerCAmelCase ,line.split(""",""" ) ) )
if x * logaa(_lowerCAmelCase ) > largest:
A_ : Tuple = x * logaa(_lowerCAmelCase )
A_ : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 569 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( a_ : list[float] , a_ : str ) -> Union[str, Any]:
"""simple docstring"""
print(f"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(a_ ):
print(f"{i}\t\t{d}" )
def snake_case ( a_ : list[dict[str, int]] , a_ : list[float] , a_ : int ) -> Optional[Any]:
"""simple docstring"""
for j in range(a_ ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def snake_case ( a_ : list[dict[str, int]] , a_ : int , a_ : int , a_ : int ) -> list[float]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = [float("""inf""" )] * vertex_count
UpperCamelCase_ : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a_ ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
UpperCamelCase_ : Tuple = distance[u] + w
UpperCamelCase_ : Union[str, Any] = check_negative_cycle(a_ , a_ , a_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase =int(input("Enter number of vertices: ").strip())
UpperCamelCase =int(input("Enter number of edges: ").strip())
UpperCamelCase =[{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCamelCase , UpperCamelCase , UpperCamelCase =(
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCamelCase ={"src": src, "dst": dest, "weight": weight}
UpperCamelCase =int(input("\nEnter shortest path source:").strip())
UpperCamelCase =bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 543 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCamelCase =re.compile(R"([A-Z]+)([A-Z][a-z])")
UpperCamelCase =re.compile(R"([a-z\d])([A-Z])")
UpperCamelCase =re.compile(R"(?<!_)_(?!_)")
UpperCamelCase =re.compile(R"(_{2,})")
UpperCamelCase =R"^\w+(\.\w+)*$"
UpperCamelCase =R"<>:/\|?*"
def snake_case ( a_ : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = _uppercase_uppercase_re.sub(r"""\1_\2""" , a_ )
UpperCamelCase_ : List[Any] = _lowercase_uppercase_re.sub(r"""\1_\2""" , a_ )
return name.lower()
def snake_case ( a_ : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = _single_underscore_re.split(a_ )
UpperCamelCase_ : Tuple = [_multiple_underscores_re.split(a_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a_ ) if n != """""" )
def snake_case ( a_ : Optional[int] ) -> str:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(a_ )
def snake_case ( a_ : List[Any] , a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , a_ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(a_ )}-{split}"
def snake_case ( a_ : Optional[Any] , a_ : int , a_ : List[Any] , a_ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = filename_prefix_for_split(a_ , a_ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
UpperCamelCase_ : str = os.path.join(a_ , a_ )
return f"{filepath}*"
def snake_case ( a_ : Tuple , a_ : Any , a_ : Optional[int] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = filename_prefix_for_split(a_ , a_ )
UpperCamelCase_ : Dict = os.path.join(a_ , a_ )
if shard_lengths:
UpperCamelCase_ : Optional[int] = len(a_ )
UpperCamelCase_ : Tuple = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(a_ )]
if filetype_suffix:
UpperCamelCase_ : Tuple = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase_ : Dict = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 543 | 1 |
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 137 |
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = """examples/"""
_lowerCAmelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowerCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowerCAmelCase = """README.md"""
def lowercase ( _a ,_a ,_a ) -> List[Any]:
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase_: List[Any] = replace.replace("VERSION" ,_a )
UpperCAmelCase_: str = re_pattern.sub(_a ,_a )
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(_a )
def lowercase ( _a ) -> List[str]:
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_a ,_a ) ,_a ,pattern="examples" )
def lowercase ( _a ,_a=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a ,_a ,_a )
if not patch:
update_version_in_examples(_a )
def lowercase ( ) -> List[str]:
UpperCAmelCase_: int = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase_: Dict = "1. Want to contribute a new model?"
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: Tuple = f.readlines()
# Find the start of the list.
UpperCAmelCase_: Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_: Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase_: str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(_a )
def lowercase ( ) -> int:
with open(REPLACE_FILES["init"] ,"r" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_: List[Any] = REPLACE_PATTERNS["init"][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def lowercase ( _a=False ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase_: int = default_version.base_version
elif patch:
UpperCAmelCase_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase_: int = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase_: Dict = input(f"Which version are you releasing? [{default_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: Union[str, Any] = default_version
print(f"Updating version to {version}." )
global_version_update(_a ,patch=_a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ) -> Union[str, Any]:
UpperCAmelCase_: Any = get_version()
UpperCAmelCase_: List[Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase_: int = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_: Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: str = dev_version
print(f"Updating version to {version}." )
global_version_update(_a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 137 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Any=None , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = True
_A = 99
_A = 384
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = 128
_A = 2
_A = 9
_A = 1
_A = None
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ):
_A = TFConvBertModel(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ):
_A = TFConvBertForMaskedLM(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ):
_A = self.num_labels
_A = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ):
_A = self.num_choices
_A = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ):
_A = self.num_labels
_A = TFConvBertForTokenClassification(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
_A = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Dict ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Dict = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = False
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = TFConvBertModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
_A = True
_A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model_class(_UpperCAmelCase )
_A = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
_A = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
_A = tf.keras.models.load_model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
if self.is_encoder_decoder:
_A = outputs['encoder_hidden_states']
_A = outputs['encoder_attentions']
else:
_A = outputs['hidden_states']
_A = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase_ ( self : int ):
_A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
_A = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : Tuple ):
_A = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
_A = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Tuple ):
_A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_A = True
_A = False
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_UpperCAmelCase )
_A = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(_UpperCAmelCase )[0]
_A = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase )
_A = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 505 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] ) -> list[int]:
'''simple docstring'''
if len(_snake_case ) == 0:
return array
_A , _A = min(_snake_case ), max(_snake_case )
# Compute the variables
_A = _max - _min + 1
_A , _A = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_A = i - _min
_A = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_A = 0
for i in range(_snake_case ):
while holes_repeat[i] > 0:
_A = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a = input('''Enter numbers separated by comma:\n''')
a = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 505 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
lowerCamelCase_ = 1_00_00
lowerCamelCase_ = None
lowerCamelCase_ = None
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
lowerCamelCase_ = ParquetConfig
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase : List[str] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__ , (str, list, tuple) ):
lowercase : Dict =data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Tuple =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase : int =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase__ ):
with open(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : Any =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase : Dict =table_cast(UpperCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
with open(UpperCAmelCase__ , '''rb''' ) as f:
lowercase : Dict =pq.ParquetFile(UpperCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowercase : int =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCAmelCase__ )}: {e}''' )
raise
| 92 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]:
lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]:
lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 )
lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
lowercase : Union[str, Any] =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : Dict =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]:
lowercase : Any =[]
# Generate more children proportionally to the fitness score.
lowercase : Dict =int(parent_a[1] * 100 ) + 1
lowercase : List[str] =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0]
lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : Optional[int] =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__magic_name__ )
# Generate random starting population.
lowercase : int =[]
for _ in range(__magic_name__ ):
population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase , lowercase : Optional[int] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : Any =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
lowercase : Dict =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 92 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase :int = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
a: Union[str, Any] = ["pixel_values"]
def __init__( self: Tuple , __UpperCamelCase: bool = True , __UpperCamelCase: Optional[Dict[str, int]] = None , __UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase: bool = True , __UpperCamelCase: Dict[str, int] = None , __UpperCamelCase: bool = True , __UpperCamelCase: Union[int, float] = 1 / 255 , __UpperCamelCase: bool = True , __UpperCamelCase: Optional[Union[float, List[float]]] = None , __UpperCamelCase: Optional[Union[float, List[float]]] = None , **__UpperCamelCase: int , ):
super().__init__(**__UpperCamelCase )
_a = size if size is not None else {'''shortest_edge''': 256}
_a = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self: Union[str, Any] , __UpperCamelCase: np.ndarray , __UpperCamelCase: Dict[str, int] , __UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase: int , ):
_a = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _A ( self: List[Any] , __UpperCamelCase: np.ndarray , __UpperCamelCase: Dict[str, int] , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase: List[Any] , ):
_a = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: np.ndarray , __UpperCamelCase: float , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase: List[Any] ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _A ( self: Dict , __UpperCamelCase: np.ndarray , __UpperCamelCase: Union[float, List[float]] , __UpperCamelCase: Union[float, List[float]] , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase: str , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _A ( self: Optional[Any] , __UpperCamelCase: ImageInput , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Dict[str, int] = None , __UpperCamelCase: PILImageResampling = None , __UpperCamelCase: bool = None , __UpperCamelCase: Dict[str, int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[float] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[float, List[float]]] = None , __UpperCamelCase: Optional[Union[float, List[float]]] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase: List[str] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def _A ( self: Optional[Any] , __UpperCamelCase: int , __UpperCamelCase: List[Tuple] = None ):
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__UpperCamelCase ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(__UpperCamelCase ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__UpperCamelCase )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 346 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __snake_case ( _UpperCamelCase ) -> int:
_a = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase_ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase_ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
re.sub('''<n>''' , '''''' , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 132 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ : Any = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[int]=None ) -> Optional[Any]:
require_version(deps[pkg], _lowerCAmelCase )
| 238 | 0 |
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : list[list[str]] = [[] for _ in range(lowercase_ )]
_lowerCamelCase : Any = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowercase_ ) <= key:
return input_string
for position, character in enumerate(lowercase_ ):
_lowerCamelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : Union[str, Any] = min(lowercase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase_ )
_lowerCamelCase : int = [''''''.join(lowercase_ ) for row in temp_grid]
_lowerCamelCase : List[Any] = ''''''.join(lowercase_ )
return output_string
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
_lowerCamelCase : list[list[str]] = [[] for _ in range(lowercase_ )] # generates template
for position in range(len(lowercase_ ) ):
_lowerCamelCase : str = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : Union[str, Any] = min(lowercase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
_lowerCamelCase : List[str] = 0
for row in temp_grid: # fills in the characters
_lowerCamelCase : Optional[Any] = input_string[counter : counter + len(lowercase_ )]
grid.append(list(lowercase_ ) )
counter += len(lowercase_ )
_lowerCamelCase : Optional[Any] = '''''' # reads as zigzag
for position in range(len(lowercase_ ) ):
_lowerCamelCase : Tuple = position % (lowest * 2) # puts it in bounds
_lowerCamelCase : List[str] = min(lowercase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : Any = {}
for key_guess in range(1 , len(lowercase_ ) ): # tries every key
_lowerCamelCase : Optional[Any] = decrypt(lowercase_ , lowercase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand()
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = parser.add_parser('''env''')
download_parser.set_defaults(func=a__)
download_parser.add_argument(
'''--accelerate-config_file''' , default=a__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a__)
def __init__( self , a__ , *a__):
"""simple docstring"""
_lowerCamelCase : str = accelerate_config_file
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''') is not None:
import safetensors
_lowerCamelCase : Optional[int] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowerCamelCase : Union[str, Any] = '''not installed'''
_lowerCamelCase : Any = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a__):
_lowerCamelCase : Optional[int] = load_config_from_file(self._accelerate_config_file).to_dict()
_lowerCamelCase : str = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()])
if isinstance(a__ , a__)
else F"""\t{accelerate_config}"""
)
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Tuple = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase : int = torch.__version__
_lowerCamelCase : List[str] = torch.cuda.is_available()
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : List[str] = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase : Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase : Optional[int] = bool(tf.config.list_physical_devices('''GPU'''))
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase : Any = flax.__version__
_lowerCamelCase : str = jax.__version__
_lowerCamelCase : Any = jaxlib.__version__
_lowerCamelCase : int = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase : int = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(a__))
return info
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 613 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _A ( _UpperCAmelCase ):
lowercase__: str = 'deformable_detr'
lowercase__: Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=None , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=3_00 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=10_24 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=6 , __magic_name__ : str=10_24 , __magic_name__ : List[str]=8 , __magic_name__ : int=0.0 , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]="relu" , __magic_name__ : Optional[Any]=2_56 , __magic_name__ : int=0.1 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : List[Any]=0.02 , __magic_name__ : List[Any]=1.0 , __magic_name__ : Dict=True , __magic_name__ : Any=False , __magic_name__ : List[Any]="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=False , __magic_name__ : List[Any]=4 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=4 , __magic_name__ : int=False , __magic_name__ : List[str]=3_00 , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=1 , __magic_name__ : List[str]=5 , __magic_name__ : Dict=2 , __magic_name__ : Dict=0.1 , __magic_name__ : List[Any]=0.25 , __magic_name__ : Optional[int]=False , **__magic_name__ : int , ) -> List[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__snake_case : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(a__ , a__ ):
__snake_case : Tuple = backbone_config.get("""model_type""" )
__snake_case : int = CONFIG_MAPPING[backbone_model_type]
__snake_case : Optional[Any] = config_class.from_dict(a__ )
__snake_case : List[str] = use_timm_backbone
__snake_case : str = backbone_config
__snake_case : int = num_channels
__snake_case : Tuple = num_queries
__snake_case : Tuple = max_position_embeddings
__snake_case : Any = d_model
__snake_case : Optional[int] = encoder_ffn_dim
__snake_case : Union[str, Any] = encoder_layers
__snake_case : List[Any] = encoder_attention_heads
__snake_case : Tuple = decoder_ffn_dim
__snake_case : Tuple = decoder_layers
__snake_case : Optional[int] = decoder_attention_heads
__snake_case : Tuple = dropout
__snake_case : int = attention_dropout
__snake_case : Dict = activation_dropout
__snake_case : Tuple = activation_function
__snake_case : str = init_std
__snake_case : Tuple = init_xavier_std
__snake_case : List[str] = encoder_layerdrop
__snake_case : Any = auxiliary_loss
__snake_case : List[Any] = position_embedding_type
__snake_case : Any = backbone
__snake_case : int = use_pretrained_backbone
__snake_case : Tuple = dilation
# deformable attributes
__snake_case : Dict = num_feature_levels
__snake_case : Dict = encoder_n_points
__snake_case : Tuple = decoder_n_points
__snake_case : Union[str, Any] = two_stage
__snake_case : List[str] = two_stage_num_proposals
__snake_case : List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__snake_case : Union[str, Any] = class_cost
__snake_case : Dict = bbox_cost
__snake_case : List[str] = giou_cost
# Loss coefficients
__snake_case : Tuple = mask_loss_coefficient
__snake_case : Union[str, Any] = dice_loss_coefficient
__snake_case : Optional[Any] = bbox_loss_coefficient
__snake_case : List[Any] = giou_loss_coefficient
__snake_case : Optional[Any] = eos_coefficient
__snake_case : Dict = focal_alpha
__snake_case : Any = disable_custom_kernels
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.d_model
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case : List[Any] = self.backbone_config.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
| 26 |
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] ) -> str:
__snake_case = ''''''
for word_or_phrase in separated:
if not isinstance(snake_case_ , snake_case_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Any = root
def __str__( self ):
'''simple docstring'''
return str(self.root )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase_ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
UpperCAmelCase_ : List[Any] = new_children
else:
UpperCAmelCase_ : int = new_children
else:
UpperCAmelCase_ : str = new_children
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.root is None
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : Tuple = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : str = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : Tuple = new_node
break
else:
UpperCAmelCase_ : Any = parent_node.right
UpperCAmelCase_ : Tuple = parent_node
def _UpperCamelCase ( self , *snake_case_ ):
'''simple docstring'''
for value in values:
self.__insert(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
UpperCAmelCase_ : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : Optional[Any] = node.left if value < node.value else node.right
return node
def _UpperCamelCase ( self , snake_case_ = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Optional[Any] = node.right
return node
def _UpperCamelCase ( self , snake_case_ = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase_ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Optional[Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Union[str, Any] = node.left
return node
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
UpperCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _UpperCamelCase ( self , snake_case_=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : list[int] = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCamelCase ( lowerCamelCase_ : Node | None ):
"""simple docstring"""
UpperCAmelCase_ : int = []
if curr_node is not None:
UpperCAmelCase_ : List[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(lowerCamelCase_ )
# Prints all the elements of the list in order traversal
print(lowerCamelCase_ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCamelCase_ )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 389 | '''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = SamImageProcessor()
UpperCAmelCase_ : Union[str, Any] = SamProcessor(snake_case_ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase_ : List[Any] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCAmelCase_ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : str = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : int = image_processor(snake_case_ , return_tensors='np' )
UpperCAmelCase_ : Dict = processor(images=snake_case_ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Tuple = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase_ : Optional[int] = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase_ : Union[str, Any] = [[6_8_3, 1_0_2_4]]
UpperCAmelCase_ : List[Any] = processor.post_process_masks(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase_ : Dict = processor.post_process_masks(
snake_case_ , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase_ : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase_ : Any = processor.post_process_masks(snake_case_ , np.array(snake_case_ ) , np.array(snake_case_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase_ : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(snake_case_ ):
UpperCAmelCase_ : str = processor.post_process_masks(snake_case_ , np.array(snake_case_ ) , np.array(snake_case_ ) )
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = SamImageProcessor()
UpperCAmelCase_ : List[str] = SamProcessor(snake_case_ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase_ : int = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCAmelCase_ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : List[str] = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = image_processor(snake_case_ , return_tensors='np' )
UpperCAmelCase_ : Optional[int] = processor(images=snake_case_ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : List[Any] = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : Optional[int] = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase_ : Optional[Any] = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase_ : Optional[int] = [[6_8_3, 1_0_2_4]]
UpperCAmelCase_ : Optional[int] = processor.post_process_masks(snake_case_ , snake_case_ , snake_case_ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase_ : Any = processor.post_process_masks(
snake_case_ , tf.convert_to_tensor(snake_case_ ) , tf.convert_to_tensor(snake_case_ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase_ : str = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase_ : Any = processor.post_process_masks(
snake_case_ , np.array(snake_case_ ) , np.array(snake_case_ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase_ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase_ : Tuple = processor.post_process_masks(
snake_case_ , np.array(snake_case_ ) , np.array(snake_case_ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : List[Any] = SamImageProcessor()
UpperCAmelCase_ : Any = SamProcessor(snake_case_ )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase_ : Optional[Any] = [tf.convert_to_tensor(snake_case_ )]
UpperCAmelCase_ : Tuple = [torch.tensor(snake_case_ )]
UpperCAmelCase_ : Any = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase_ : Tuple = [[6_8_3, 1_0_2_4]]
UpperCAmelCase_ : str = processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , return_tensors='tf' )
UpperCAmelCase_ : Union[str, Any] = processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Tuple = SamProcessor(image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(snake_case_ , return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase_ : int = processor(images=snake_case_ , return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase_ : List[Any] = image_processor(snake_case_ , return_tensors='tf' )['pixel_values'].numpy()
UpperCAmelCase_ : str = processor(images=snake_case_ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
| 389 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _SCREAMING_SNAKE_CASE ( A ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _SCREAMING_SNAKE_CASE ( A ):
def __init__( self , A_ , A_ = None ):
_UpperCAmelCase : Any = max_length
_UpperCAmelCase : List[Any] = max_position_embeddings
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
_UpperCAmelCase : List[str] = input_ids.shape[-1]
_UpperCAmelCase : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _SCREAMING_SNAKE_CASE ( A ):
def __init__( self , A_ , A_ ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , A_ , )
_UpperCAmelCase : Tuple = start_length
_UpperCAmelCase : int = max_new_tokens
_UpperCAmelCase : Optional[int] = start_length + max_new_tokens
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
return input_ids.shape[-1] >= self.max_length
class _SCREAMING_SNAKE_CASE ( A ):
def __init__( self , A_ , A_ = None ):
_UpperCAmelCase : str = max_time
_UpperCAmelCase : str = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
return time.time() - self.initial_timestamp > self.max_time
class _SCREAMING_SNAKE_CASE ( A ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
return any(criteria(A_ , A_ ) for criteria in self )
@property
def __snake_case( self ):
for stopping_criterium in self:
if isinstance(A_ , A_ ):
return stopping_criterium.max_length
elif isinstance(A_ , A_ ):
return stopping_criterium.max_length
return None
def a__ ( snake_case__ : StoppingCriteriaList , snake_case__ : int ):
_UpperCAmelCase : Tuple = stopping_criteria.max_length
_UpperCAmelCase : List[str] = deepcopy(snake_case__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , snake_case__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case__ ) )
return new_stopping_criteria
| 643 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE = BlenderbotConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[Any] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : Tuple = bos_token_id
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : Dict = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def __snake_case( self , A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = TFBlenderbotModel(config=A_ ).get_decoder()
_UpperCAmelCase : List[Any] = inputs_dict["""input_ids"""]
_UpperCAmelCase : Any = input_ids[:1, :]
_UpperCAmelCase : List[str] = inputs_dict["""attention_mask"""][:1, :]
_UpperCAmelCase : Tuple = inputs_dict["""head_mask"""]
_UpperCAmelCase : Union[str, Any] = 1
# first forward pass
_UpperCAmelCase : List[str] = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
_UpperCAmelCase,_UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : Tuple = model(A_ , attention_mask=A_ )[0]
_UpperCAmelCase : int = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def a__ ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : str=None , snake_case__ : Optional[int]=None , snake_case__ : str=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , ):
if attention_mask is None:
_UpperCAmelCase : List[Any] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( A , A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __snake_case( self ):
_UpperCAmelCase : Union[str, Any] = TFBlenderbotModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A_ )
def __snake_case( self ):
self.config_tester.run_common_tests()
def __snake_case( self ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ['''My friends are cool but they eat too many carbs.''']
__SCREAMING_SNAKE_CASE = '''facebook/blenderbot-400M-distill'''
@cached_property
def __snake_case( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case( self ):
_UpperCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __snake_case( self ):
_UpperCAmelCase : Any = self.tokenizer(self.src_text , return_tensors="""tf""" )
_UpperCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , )
_UpperCAmelCase : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 643 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : bool = field(default=__a , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase : bool = field(
default=__a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__a , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_lowercase , _lowercase ):
_UpperCamelCase: str = v.to_dict()
return d | 716 | import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( lowercase: ndarray ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
class __magic_name__ :
"""simple docstring"""
def __init__( self : int , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
"""simple docstring"""
_UpperCamelCase: int = regularization
_UpperCamelCase: Optional[int] = gamma
if kernel == "linear":
_UpperCamelCase: Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
_UpperCamelCase: Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_UpperCamelCase: Optional[int] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.dot(_lowercase , _lowercase )
def lowerCAmelCase ( self : str , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : str , _lowercase : list[ndarray] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: List[str] = observations
_UpperCamelCase: Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_UpperCamelCase) , ): Any = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
_UpperCamelCase: Optional[int] = 0
((_UpperCamelCase) , ): str = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
_UpperCamelCase: Optional[Any] = LinearConstraint(_lowercase , 0 , 0 )
_UpperCamelCase: Optional[int] = Bounds(0 , self.regularization )
_UpperCamelCase: Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
_UpperCamelCase: Union[str, Any] = l_star
# calculating mean offset of separation plane to points
_UpperCamelCase: List[str] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_UpperCamelCase: str = s / n
def lowerCAmelCase ( self : Optional[Any] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 264 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Tuple = k_size // 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__SCREAMING_SNAKE_CASE : str = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = image.shape[0], image.shape[1]
# dst image height and width
__SCREAMING_SNAKE_CASE : Dict = height - k_size + 1
__SCREAMING_SNAKE_CASE : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__SCREAMING_SNAKE_CASE : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
__SCREAMING_SNAKE_CASE : List[str] = 0
for i, j in product(range(_lowerCamelCase ) , range(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__SCREAMING_SNAKE_CASE : Any = gen_gaussian_kernel(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = ravel(_lowerCamelCase )
# reshape and get the dst image
__SCREAMING_SNAKE_CASE : Optional[Any] = dot(_lowerCamelCase , _lowerCamelCase ).reshape(_lowerCamelCase , _lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
UpperCamelCase__ : Any = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCamelCase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCamelCase__ : List[str] = gaussian_filter(gray, 3, sigma=1)
UpperCamelCase__ : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey() | 578 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int
_A : TreeNode | None = None
_A : TreeNode | None = None
UpperCamelCase__ : Union[str, Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCAmelCase_ ( _lowerCamelCase: TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_lowerCamelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = get_distrib(node.left )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = get_distrib(node.right )
__SCREAMING_SNAKE_CASE : Tuple = 1 - left_distrib_excess
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - right_distrib_excess
__SCREAMING_SNAKE_CASE : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
__SCREAMING_SNAKE_CASE : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 578 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : Dict = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Tuple:
lowerCamelCase_ : Tuple = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCamelCase_ : int = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
lowerCamelCase_ : int = flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
lowerCamelCase_ : str = flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
lowerCamelCase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCamelCase_ : int = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
lowerCamelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowerCamelCase_ : Any = flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase_ : Tuple = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowerCamelCase_ : int = flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = flatten_dict(modela.params )
lowerCamelCase_ : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCamelCase_ : Tuple = False
return models_are_equal
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
lowerCamelCase_ : str = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCamelCase_ : Any = FlaxBertModel(__magic_name__ )
lowerCamelCase_ : Any = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
lowerCamelCase_ : str = FlaxBertModel.from_pretrained(__magic_name__ )
lowerCamelCase_ : Tuple = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCamelCase_ : Optional[int] = FlaxBertModel(__magic_name__ )
lowerCamelCase_ : List[str] = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
lowerCamelCase_ : List[Any] = FlaxBertModel.from_pretrained(__magic_name__ )
lowerCamelCase_ : int = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
lowerCamelCase_ : Optional[int] = "bert"
lowerCamelCase_ : Dict = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
lowerCamelCase_ : Any = FlaxBertModel.from_pretrained(__magic_name__ )
lowerCamelCase_ : Dict = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
lowerCamelCase_ : Dict = "bert"
lowerCamelCase_ : Any = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
lowerCamelCase_ : Tuple = FlaxBertModel.from_pretrained(__magic_name__ )
lowerCamelCase_ : Tuple = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 253 |
from collections.abc import Generator
from math import sin
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
if len(__UpperCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
lowerCamelCase_ : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __a ( __UpperCAmelCase : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Tuple = format(__UpperCAmelCase , "08x" )[-8:]
lowerCamelCase_ : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = b""
for char in message:
bit_string += format(__UpperCAmelCase , "08b" ).encode("utf-8" )
lowerCamelCase_ : Optional[int] = format(len(__UpperCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __a ( __UpperCAmelCase : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__UpperCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCAmelCase ) , 512 ):
lowerCamelCase_ : Union[str, Any] = bit_string[pos : pos + 512]
lowerCamelCase_ : Any = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __a ( __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Dict = format(__UpperCAmelCase , "032b" )
lowerCamelCase_ : Dict = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = preprocess(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase_ : List[str] = 0X67_452_301
lowerCamelCase_ : Optional[int] = 0XEF_CDA_B89
lowerCamelCase_ : str = 0X98_BAD_CFE
lowerCamelCase_ : Optional[int] = 0X10_325_476
lowerCamelCase_ : Union[str, Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = aa
lowerCamelCase_ : List[str] = ba
lowerCamelCase_ : Optional[int] = ca
lowerCamelCase_ : List[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase_ : Dict = d ^ (b & (c ^ d))
lowerCamelCase_ : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase_ : Any = c ^ (d & (b ^ c))
lowerCamelCase_ : List[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase_ : List[Any] = b ^ c ^ d
lowerCamelCase_ : int = (3 * i + 5) % 16
else:
lowerCamelCase_ : str = c ^ (b | not_aa(__UpperCAmelCase ))
lowerCamelCase_ : int = (7 * i) % 16
lowerCamelCase_ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase_ : Union[str, Any] = d
lowerCamelCase_ : Optional[int] = c
lowerCamelCase_ : Union[str, Any] = b
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase_ : Tuple = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='deberta-v2'
def __init__( self : List[str] , a : int=12_8100 , a : str=1536 , a : List[Any]=24 , a : Union[str, Any]=24 , a : Union[str, Any]=6144 , a : Dict="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Any=512 , a : Optional[int]=0 , a : Any=0.02 , a : int=1e-7 , a : Dict=False , a : Optional[int]=-1 , a : int=0 , a : Union[str, Any]=True , a : Dict=None , a : List[Any]=0 , a : Tuple="gelu" , **a : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = relative_attention
SCREAMING_SNAKE_CASE : Dict = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = position_biased_input
# Backwards compatibility
if type(a ) == str:
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in pos_att_type.lower().split("|" )]
SCREAMING_SNAKE_CASE : Dict = pos_att_type
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = kwargs.get("pooler_hidden_size" , a )
SCREAMING_SNAKE_CASE : int = pooler_dropout
SCREAMING_SNAKE_CASE : List[str] = pooler_hidden_act
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 12
def __UpperCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , a : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super().generate_dummy_inputs(preprocessor=a , framework=a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 25 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowercase = v
else:
__lowercase = v
__lowercase = chkpt['''params''']
__lowercase = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
__lowercase = chkpt['''dico_word2id''']
__lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(lowercase , lowercase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 534 | 0 |
'''simple docstring'''
def lowerCAmelCase( a__ : int = 10 ):
'''simple docstring'''
if not isinstance(a__ , a__ ) or n < 0:
raise ValueError("Invalid input" )
lowerCamelCase__ = 10**n
lowerCamelCase__ = 2_8433 * (pow(2 , 783_0457 , a__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(1_0) = }')
| 426 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 426 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
_lowerCAmelCase : List[Any] =False
_lowerCAmelCase : Optional[Any] =False
def _A ( SCREAMING_SNAKE_CASE ):
return TrainCommand(SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( _a ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( lowerCamelCase__ ):
UpperCAmelCase__: str = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=lowerCamelCase__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=lowerCamelCase__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=lowerCamelCase__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=lowerCamelCase__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=lowerCamelCase__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=lowerCamelCase__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=lowerCamelCase__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=lowerCamelCase__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=lowerCamelCase__ , default=3_2 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=lowerCamelCase__ , default=6_4 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=lowerCamelCase__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=lowerCamelCase__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: List[str] = logging.get_logger("transformers-cli/training" )
UpperCAmelCase__: Union[str, Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=lowerCamelCase__ )
UpperCAmelCase__: List[Any] = args.output
UpperCAmelCase__: Tuple = args.column_label
UpperCAmelCase__: str = args.column_text
UpperCAmelCase__: Any = args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
UpperCAmelCase__: List[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
UpperCAmelCase__: str = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__: List[str] = None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
UpperCAmelCase__: Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__: Tuple = args.validation_split
UpperCAmelCase__: List[Any] = args.train_batch_size
UpperCAmelCase__: str = args.valid_batch_size
UpperCAmelCase__: Any = args.learning_rate
UpperCAmelCase__: Any = args.adam_epsilon
def _UpperCAmelCase ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase ( self ):
raise NotImplementedError
def _UpperCAmelCase ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 113 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple ={
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =["""MobileNetV2FeatureExtractor"""]
_lowerCAmelCase : Optional[int] =["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] =[
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_A = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_A = "▁"
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : int = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Union[str, Any]=True , A_ : Tuple=True , A_ : str=False , A_ : str="[CLS]" , A_ : List[str]="[SEP]" , A_ : Optional[int]="<unk>" , A_ : Any="[SEP]" , A_ : int="<pad>" , A_ : Any="[CLS]" , A_ : int="[MASK]" , A_ : Optional[Dict[str, Any]] = None , **A_ : int , )-> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def A ( self : str )-> int:
return len(self.sp_model )
def A ( self : Union[str, Any] )-> List[str]:
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> Dict:
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : Union[str, Any] , A_ : int )-> Dict:
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Dict , A_ : List[str] )-> Any:
if self.remove_space:
__UpperCamelCase = " ".join(inputs.strip().split() )
else:
__UpperCamelCase = inputs
__UpperCamelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase = unicodedata.normalize("NFKD" , A_ )
__UpperCamelCase = "".join([c for c in outputs if not unicodedata.combining(A_ )] )
if self.do_lower_case:
__UpperCamelCase = outputs.lower()
return outputs
def A ( self : int , A_ : str )-> List[str]:
__UpperCamelCase = self.preprocess_text(A_ )
__UpperCamelCase = self.sp_model.encode(A_ , out_type=A_ )
__UpperCamelCase = []
for piece in pieces:
if len(A_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase = cur_pieces[1:]
else:
__UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A_ )
else:
new_pieces.append(A_ )
return new_pieces
def A ( self : Optional[Any] , A_ : Dict )-> Union[str, Any]:
return self.sp_model.PieceToId(A_ )
def A ( self : Optional[Any] , A_ : int )-> Dict:
return self.sp_model.IdToPiece(A_ )
def A ( self : Optional[int] , A_ : Any )-> str:
__UpperCamelCase = []
__UpperCamelCase = ""
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
__UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def A ( self : Any , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def A ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : str , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 228 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(_snake_case ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
__UpperCamelCase = json.load(_snake_case )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(_snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_snake_case ,sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 228 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''Speech2TextFeatureExtractor'''
_A : Optional[Any] = '''Speech2TextTokenizer'''
def __init__( self : List[str] , __a : Union[str, Any] , __a : Optional[Any] ) -> int:
"""simple docstring"""
super().__init__(__a , __a )
__lowercase : int = self.feature_extractor
__lowercase : Tuple = False
def __call__( self : Dict , *__a : Optional[int] , **__a : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowercase : Dict = kwargs.pop("""raw_speech""" )
else:
__lowercase : Dict = kwargs.pop("""audio""" , __a )
__lowercase : Optional[int] = kwargs.pop("""sampling_rate""" , __a )
__lowercase : Any = kwargs.pop("""text""" , __a )
if len(__a ) > 0:
__lowercase : Optional[Any] = args[0]
__lowercase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowercase : List[str] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowercase : str = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase : str = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Any , *__a : str , **__a : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def lowerCAmelCase ( self : Tuple , *__a : str , **__a : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowercase : int = True
__lowercase : Tuple = self.tokenizer
yield
__lowercase : List[str] = self.feature_extractor
__lowercase : List[Any] = False | 149 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = 13
__lowercase : Union[str, Any] = 7
__lowercase : Any = True
__lowercase : Union[str, Any] = True
__lowercase : str = True
__lowercase : Any = 99
__lowercase : List[Any] = 32
__lowercase : Optional[Any] = 2
__lowercase : Any = 4
__lowercase : str = 37
__lowercase : List[Any] = """gelu"""
__lowercase : Dict = 0.1
__lowercase : List[str] = 0.1
__lowercase : List[str] = 512
__lowercase : List[Any] = 16
__lowercase : Dict = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Tuple = 3
__lowercase : Any = 4
__lowercase : str = None
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
__lowercase : str = None
__lowercase : Any = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = self.prepare_config_and_inputs()
__lowercase : Tuple = True
__lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int , __a : int , __a : Any , __a : int , __a : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = TFEsmModel(config=__a )
__lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Optional[Any] = model(__a )
__lowercase : Any = [input_ids, input_mask]
__lowercase : List[str] = model(__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : Optional[int] , __a : Any , __a : Optional[Any] , __a : str , __a : Optional[int] , __a : List[str] , __a : List[str] , ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = True
__lowercase : int = TFEsmModel(config=__a )
__lowercase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
__lowercase : Union[str, Any] = model(__a )
__lowercase : Any = [input_ids, input_mask]
__lowercase : Union[str, Any] = model(__a , encoder_hidden_states=__a )
# Also check the case where encoder outputs are not passed
__lowercase : Any = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , __a : Optional[int] , __a : Tuple , __a : List[str] , __a : Tuple , __a : Optional[int] , __a : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = TFEsmForMaskedLM(config=__a )
__lowercase : Union[str, Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Dict , __a : Optional[int] , __a : List[str] , __a : str , __a : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.num_labels
__lowercase : str = TFEsmForTokenClassification(config=__a )
__lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = TFEsmModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = TFEsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowercase : Dict = model.get_bias()
assert isinstance(__a , __a )
for k, v in name.items():
assert isinstance(__a , tf.Variable )
else:
__lowercase : Optional[int] = model.get_output_embeddings()
assert x is None
__lowercase : str = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Optional[int] = model(__a )[0]
__lowercase : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __a )
# compare the actual values for a slice.
__lowercase : List[str] = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__lowercase : Union[str, Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Optional[Any] = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 149 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = '''text'''
_lowerCamelCase = '''labels'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 719 |
"""simple docstring"""
UpperCAmelCase =256
# Modulus to hash a string
UpperCAmelCase =1_000_003
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = len(_a )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(_a , _a )
A = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 255 | 0 |
'''simple docstring'''
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : int = val
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
def UpperCamelCase__ ( self ,_snake_case ):
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ : Tuple = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
UpperCAmelCase_ : int = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
UpperCAmelCase_ : Optional[int] = val
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
if root:
inorder(root.left , _UpperCamelCase )
res.append(root.val )
inorder(root.right , _UpperCamelCase )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return arr
UpperCAmelCase_ : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ : List[Any] = []
inorder(_UpperCamelCase , _UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 71 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = q_groups
__lowerCamelCase = k_groups
__lowerCamelCase = v_groups
__lowerCamelCase = post_attention_groups
__lowerCamelCase = intermediate_groups
__lowerCamelCase = output_groups
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = SqueezeBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase__ = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , dim=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SqueezeBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__lowerCamelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
| 175 | 0 |
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273 |
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case : List[Any] = "pytorch_model.bin"
_snake_case : Dict = "pytorch_model.bin.index.json"
_snake_case : str = "adapter_config.json"
_snake_case : Dict = "adapter_model.bin"
_snake_case : List[str] = "adapter_model.safetensors"
_snake_case : Dict = "tf_model.h5"
_snake_case : int = "tf_model.h5.index.json"
_snake_case : Union[str, Any] = "model.ckpt"
_snake_case : Dict = "flax_model.msgpack"
_snake_case : Union[str, Any] = "flax_model.msgpack.index.json"
_snake_case : Tuple = "model.safetensors"
_snake_case : Union[str, Any] = "model.safetensors.index.json"
_snake_case : Optional[int] = "config.json"
_snake_case : List[Any] = "preprocessor_config.json"
_snake_case : Optional[int] = FEATURE_EXTRACTOR_NAME
_snake_case : Union[str, Any] = "generation_config.json"
_snake_case : int = "modelcard.json"
_snake_case : Optional[Any] = "▁"
_snake_case : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case : int = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case : List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCAmelCase_ ( __lowerCamelCase ):
if version.parse(__lowerCamelCase ) < version.parse(__lowerCamelCase ):
if "dev" in min_version:
__snake_case : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__snake_case : List[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : int = 8 , **lowerCamelCase : Tuple , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Dict = do_rescale
__snake_case : Dict = rescale_factor
__snake_case : Optional[Any] = do_pad
__snake_case : Tuple = pad_size
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
__snake_case , __snake_case : List[str] = get_image_size(lowerCamelCase )
__snake_case : Optional[Any] = (old_height // size + 1) * size - old_height
__snake_case : List[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : str = do_pad if do_pad is not None else self.do_pad
__snake_case : Any = pad_size if pad_size is not None else self.pad_size
__snake_case : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_pad:
__snake_case : Optional[Any] = [self.pad(lowerCamelCase , size=lowerCamelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 | 1 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_a : Union[str, Any] = datasets.logging.get_logger(__name__)
_a : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_a : Optional[int] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_a : Tuple = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_a : Optional[int] = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A (datasets.Metric ):
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _snake_case ( self , UpperCamelCase_ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
__UpperCAmelCase : Any = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__UpperCAmelCase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__UpperCAmelCase : int = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__UpperCAmelCase : Optional[Any] = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ )
return {"scores": scores}
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase ( SCREAMING_SNAKE_CASE__ : list[int | str] ) -> Dict:
create_state_space_tree(snake_case__ , [] , 0 , [0 for i in range(len(snake_case__ ) )] )
def lowercase ( SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , ) -> Optional[Any]:
if index == len(snake_case__ ):
print(snake_case__ )
return
for i in range(len(snake_case__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_snake_case : Optional[Any] = True
create_state_space_tree(snake_case__ , snake_case__ , index + 1 , snake_case__ )
current_sequence.pop()
_snake_case : Optional[int] = False
a__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
a__ = ["""A""", """B""", """C"""]
generate_all_permutations(sequence_a)
| 706 |
import argparse
import os
import re
a__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
a__ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
_snake_case : str = content.split("""\n""" )
_snake_case : List[str] = []
_snake_case : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_snake_case : Union[str, Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_snake_case : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_snake_case : Optional[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_snake_case : int = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : _re_identifier.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
elif "\n".join(SCREAMING_SNAKE_CASE__ ) != content:
return True
def lowercase ( SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
_snake_case : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for f in os.listdir(SCREAMING_SNAKE_CASE__ ) if f.endswith(""".py""" )]
_snake_case : str = [sort_auto_mapping(SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = [f for f, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(SCREAMING_SNAKE_CASE__ )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 198 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=2 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=None , UpperCamelCase_=2 , UpperCamelCase_=2 , ) -> Optional[int]:
"""simple docstring"""
a_ : int = parent
a_ : int = batch_size
a_ : int = patch_size
a_ : Union[str, Any] = max_length
a_ : Dict = num_mel_bins
a_ : Any = is_training
a_ : Tuple = use_labels
a_ : Optional[int] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : str = intermediate_size
a_ : List[Any] = hidden_act
a_ : Tuple = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : List[Any] = type_sequence_label_size
a_ : Union[str, Any] = initializer_range
a_ : List[Any] = scope
a_ : List[Any] = frequency_stride
a_ : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a_ : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a_ : str = (self.max_length - self.patch_size) // self.time_stride + 1
a_ : str = frequency_out_dimension * time_out_dimension
a_ : Dict = num_patches + 2
def A ( self ) -> Tuple:
"""simple docstring"""
a_ : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a_ : Union[str, Any] = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = self.get_config()
return config, input_values, labels
def A ( self ) -> Optional[Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
a_ : List[str] = ASTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a_ : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self ) -> str:
"""simple docstring"""
a_ : Dict = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : List[str] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __a , __a , unittest.TestCase ):
UpperCAmelCase : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : List[Any] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Optional[int] = False
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : int = ASTModelTester(self )
a_ : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def A ( self ) -> Optional[int]:
"""simple docstring"""
pass
def A ( self ) -> Dict:
"""simple docstring"""
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def A ( self ) -> int:
"""simple docstring"""
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(lowerCAmelCase__ )
a_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[Any] = [*signature.parameters.keys()]
a_ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def A ( self ) -> Any:
"""simple docstring"""
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def A ( self ) -> str:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = ASTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Union[str, Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
a_ , a_ : List[Any] = torchaudio.load(UpperCamelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case__ ( unittest.TestCase ):
@cached_property
def A ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def A ( self ) -> Any:
"""simple docstring"""
a_ : Optional[int] = self.default_feature_extractor
a_ : List[str] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowerCAmelCase__ )
a_ : Dict = self.default_feature_extractor
a_ , a_ : Any = prepare_audio()
a_ : List[str] = audio.squeeze().numpy()
a_ : Optional[int] = feature_extractor(lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a_ : List[str] = model(**lowerCAmelCase__ )
# verify the logits
a_ : List[str] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a_ : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 419 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = '''bert'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 155 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
def __init__( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple=1_3 , __magic_name__ : List[str]=7 , __magic_name__ : str=False , __magic_name__ : Tuple=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]=3_3 , __magic_name__ : Dict=3_2 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : str=3_7 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Tuple=5_1_2 , __magic_name__ : int=1_6 , __magic_name__ : List[str]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
UpperCamelCase = EsmModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCamelCase = model(__magic_name__ )
UpperCamelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = EsmForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = EsmForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = EsmModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = EsmModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()[0]
UpperCamelCase = EsmEmbeddings(config=__magic_name__ )
UpperCamelCase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
UpperCamelCase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCamelCase = create_position_ids_from_input_ids(__magic_name__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__magic_name__ , __magic_name__ ) ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()[0]
UpperCamelCase = EsmEmbeddings(config=__magic_name__ )
UpperCamelCase = torch.empty(2 , 4 , 3_0 )
UpperCamelCase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCamelCase = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCamelCase = embeddings.create_position_ids_from_inputs_embeds(__magic_name__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__magic_name__ , __magic_name__ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
class UpperCAmelCase ( __snake_case ):
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase = model(__magic_name__ )[0]
UpperCamelCase = 3_3
UpperCamelCase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCamelCase = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
UpperCamelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
UpperCamelCase = model(__magic_name__ )[0]
# compare the actual values for a slice.
UpperCamelCase = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
| 707 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( __snake_case ):
def __init__( self : Dict , *__magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 1_2_8 # default number of calibration samples
def lowerCamelCase_ ( self : str , __magic_name__ : List[Any]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(__magic_name__ , description="""Calibration""" )
return DataLoader(
__magic_name__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__magic_name__ , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(__magic_name__ )
UpperCamelCase = self.model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args , calib=__magic_name__ )
model.eval()
quant_trainer.enable_calibration(__magic_name__ )
logger.info("""***** Running calibration *****""" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(__magic_name__ ):
# Prediction step
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prediction_step(__magic_name__ , __magic_name__ , prediction_loss_only=__magic_name__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = model
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str = "eval" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
self.log(__magic_name__ )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : str = "test" ):
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[Any]="./" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = next(iter(__magic_name__ ) )
# saving device - to make it consistent
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase = tuple(v.to(__magic_name__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(__magic_name__ )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(__magic_name__ , """module""" ) else model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = os.path.join(__magic_name__ , """model.onnx""" )
logger.info(F'exporting model to {output_model_file}' )
UpperCamelCase = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__magic_name__ , __magic_name__ , __magic_name__ , export_params=__magic_name__ , opset_version=1_3 , do_constant_folding=__magic_name__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__magic_name__ , )
logger.info("""onnx export finished""" )
| 181 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :str , __A :List[Any] , __A :Optional[Any]=7 , __A :int=3 , __A :int=10 , __A :Dict=18 , __A :str=30 , __A :Any=400 , __A :Dict=True , __A :Any=None , __A :List[str]=True , __A :Dict=[0.5, 0.5, 0.5] , __A :List[str]=[0.5, 0.5, 0.5] , __A :List[str]=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = crop_size
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = VivitImageProcessor if is_vision_available() else None
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VivitImageProcessingTester(self )
@property
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 6 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ = re.compile(r'''\s+''')
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__A ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = [len(__A ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__A ), "line_max": max(__A )}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( __A :str ,__A :int ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCamelCase__ ( __A :Optional[int] ,__A :int=5 ):
"""simple docstring"""
__snake_case = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__snake_case = example["""content"""].splitlines()
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( __A :int ,__A :int=5 ,__A :str=0.05 ):
"""simple docstring"""
__snake_case = ["""unit tests""", """test file""", """configuration file"""]
__snake_case = example["""content"""].splitlines()
__snake_case = 0
__snake_case = 0
# first test
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case = example["""content"""].count("""\n""" )
__snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = ["""def """, """class """, """for """, """while """]
__snake_case = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( __A :Dict ,__A :List[str]=4 ):
"""simple docstring"""
__snake_case = example["""content"""].splitlines()
__snake_case = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = tokenizer(example["""content"""] ,truncation=__A )["""input_ids"""]
__snake_case = len(example["""content"""] ) / len(__A )
return {"ratio": ratio}
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = {}
results.update(get_hash(__A ) )
results.update(line_stats(__A ) )
results.update(alpha_stats(__A ) )
results.update(char_token_ratio(__A ) )
results.update(is_autogenerated(__A ) )
results.update(is_config_or_test(__A ) )
results.update(has_no_keywords(__A ) )
results.update(has_few_assignments(__A ) )
return results
def lowerCamelCase__ ( __A :int ,__A :str ,__A :Dict ):
"""simple docstring"""
if not check_uniques(__A ,__A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""rb""" ) as f_in:
with gzip.open(str(__A ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__A ,__A )
os.unlink(__A )
# Settings
UpperCamelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ = multiprocessing.cpu_count()
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ = time.time()
UpperCamelCase__ = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
UpperCamelCase__ = set(ds.unique('''hash'''))
UpperCamelCase__ = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ = time.time()
UpperCamelCase__ ,UpperCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
UpperCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ = str(data_dir / F'file-{file_number+1:012}.json')
UpperCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 268 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase( _A : float , _A : float ):
'''simple docstring'''
return math.pow(_A , 2 ) - a
def __UpperCamelCase( _A : float ):
'''simple docstring'''
return 2 * x
def __UpperCamelCase( _A : float ):
'''simple docstring'''
UpperCAmelCase__ : int = 2.0
while start <= a:
UpperCAmelCase__ : int = math.pow(_A , 2 )
return start
def __UpperCamelCase( _A : float , _A : int = 99_99 , _A : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase__ : Any = get_initial_point(_A )
for _ in range(_A ):
UpperCAmelCase__ : Optional[Any] = value
UpperCAmelCase__ : Tuple = value - fx(_A , _A ) / fx_derivative(_A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 496 | '''simple docstring'''
import re
def __UpperCamelCase( _A : str ):
'''simple docstring'''
if len(re.findall('''[ATCG]''' , _A ) ) != len(_A ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 496 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
model.train()
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = F.mse_loss(SCREAMING_SNAKE_CASE_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(SCREAMING_SNAKE_CASE_ )
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase__ = AdamW(params=model.parameters() , lr=1e-3 )
lowercase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase__ = LambdaLR(SCREAMING_SNAKE_CASE_ , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
lowercase__ = LambdaLR(SCREAMING_SNAKE_CASE_ , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
# Use a single batch
lowercase__ , lowercase__ = next(iter(SCREAMING_SNAKE_CASE_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
# Use a single batch
lowercase__ , lowercase__ = next(iter(SCREAMING_SNAKE_CASE_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = Accelerator(
split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
GradientState._reset_state()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = Accelerator(
split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE_ ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ( ):
lowercase__ = Accelerator()
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
lowercase__ = RegressionDataset(length=96 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE_ )
if iteration < len(SCREAMING_SNAKE_CASE_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE_ )
if batch_num < len(SCREAMING_SNAKE_CASE_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ( ):
lowercase__ = Accelerator()
lowercase__ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(SCREAMING_SNAKE_CASE_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(SCREAMING_SNAKE_CASE_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 413 | 1 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 720 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _snake_case( unittest.TestCase , UpperCAmelCase ):
def _UpperCamelCase (self : int ) -> Any:
"""simple docstring"""
A__ = load_tool('text-to-speech' )
self.tool.setup()
def _UpperCamelCase (self : int ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = self.tool('hey' )
A__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
A__ = self.tool('hey' )
A__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 531 |
'''simple docstring'''
import argparse
import copy
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = {}
with open(UpperCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[1], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[0], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ) as f:
A__ = f.read(1 )
A__ = start_node
A__ = []
A__ = start_node
A__ = 0
while visiting not in first_solution:
A__ = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCAmelCase ) and k[0] not in first_solution:
A__ = k[1]
A__ = k[0]
first_solution.append(UpperCAmelCase )
A__ = distance_of_first_solution + int(UpperCAmelCase )
A__ = best_node
first_solution.append(UpperCAmelCase )
A__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = []
for n in solution[1:-1]:
A__ = solution.index(UpperCAmelCase )
for kn in solution[1:-1]:
A__ = solution.index(UpperCAmelCase )
if n == kn:
continue
A__ = copy.deepcopy(UpperCAmelCase )
A__ = kn
A__ = n
A__ = 0
for k in _tmp[:-1]:
A__ = _tmp[_tmp.index(UpperCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ = distance + int(i[1] )
_tmp.append(UpperCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = 1
A__ = first_solution
A__ = []
A__ = distance_of_first_solution
A__ = solution
while count <= iters:
A__ = find_neighborhood(UpperCAmelCase ,UpperCAmelCase )
A__ = 0
A__ = neighborhood[index_of_best_solution]
A__ = len(UpperCAmelCase ) - 1
A__ = False
while not found:
A__ = 0
while i < len(UpperCAmelCase ):
if best_solution[i] != solution[i]:
A__ = best_solution[i]
A__ = solution[i]
break
A__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ = True
A__ = best_solution[:-1]
A__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ = cost
A__ = solution
else:
A__ = index_of_best_solution + 1
A__ = neighborhood[index_of_best_solution]
if len(UpperCAmelCase ) >= size:
tabu_list.pop(0 )
A__ = count + 1
return best_solution_ever, best_cost
def _A ( UpperCAmelCase=None ):
'''simple docstring'''
A__ = generate_neighbours(args.File )
A__ , A__ = generate_first_solution(
args.File ,UpperCAmelCase )
A__ , A__ = tabu_search(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,args.Iterations ,args.Size ,)
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 531 | 1 |
"""simple docstring"""
a_ : Optional[Any] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a_ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ : Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 263 |
"""simple docstring"""
import math
def UpperCAmelCase ( A__: int = 100 ) -> int:
__lowerCamelCase : Optional[Any] = sum(i * i for i in range(1 , n + 1 ) )
__lowerCamelCase : Union[str, Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 263 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def __lowercase ( self : Dict ):
lowerCAmelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : int=None ):
lowerCAmelCase = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__SCREAMING_SNAKE_CASE , """w""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , )
def __lowercase ( self : int ):
lowerCAmelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 169 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = True
_UpperCAmelCase : Union[str, Any] = 10
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFLayoutLMModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _UpperCAmelCase ( self : List[str] ):
pass
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
UpperCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized sequence classification head
UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
UpperCAmelCase = outputs.loss
UpperCAmelCase = (2,)
self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = (2, 2)
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Tuple ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[Any] ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
| 333 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Any ) ->Optional[Any]:
'''simple docstring'''
_lowercase : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_lowercase : Tuple = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
_lowercase : Dict = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
_lowercase : List[str] = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, Any] ) ->str:
'''simple docstring'''
if "visual_encoder" in key:
_lowercase : Tuple = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
_lowercase : Optional[int] = re.sub(R'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
_lowercase : str = re.sub(R'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
_lowercase : int = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
_lowercase : Optional[int] = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
_lowercase : Union[str, Any] = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
_lowercase : Tuple = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
_lowercase : List[str] = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
_lowercase : Any = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
_lowercase : str = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]=None ) ->Any:
'''simple docstring'''
if config_path is not None:
_lowercase : str = BlipConfig.from_pretrained(snake_case_ )
else:
_lowercase : Optional[int] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_lowercase : List[Any] = BlipForConditionalGeneration(snake_case_ ).eval()
_lowercase : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_lowercase : List[Any] = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit='''base''' )
_lowercase : str = pt_model.eval()
_lowercase : str = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowercase : Optional[Any] = modified_state_dict.pop(snake_case_ )
_lowercase : Dict = rename_key(snake_case_ )
_lowercase : Union[str, Any] = value
hf_model.load_state_dict(snake_case_ )
_lowercase : Optional[Any] = 3_84
_lowercase : List[str] = load_demo_image(image_size=snake_case_ , device='''cpu''' )
_lowercase : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowercase : Any = tokenizer(['''a picture of'''] ).input_ids
_lowercase : Optional[int] = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowercase : Optional[Any] = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowercase : Tuple = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_lowercase : List[Any] = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
_lowercase : int = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowercase : Union[str, Any] = modified_state_dict.pop(snake_case_ )
_lowercase : List[Any] = rename_key(snake_case_ )
_lowercase : Dict = value
_lowercase : Any = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
_lowercase : str = ['''How many dogs are in this image?''']
_lowercase : List[Any] = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
_lowercase : Dict = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_lowercase : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_lowercase : Any = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
_lowercase : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowercase : int = modified_state_dict.pop(snake_case_ )
_lowercase : Any = rename_key(snake_case_ )
_lowercase : Tuple = value
_lowercase : str = BlipForImageTextRetrieval(snake_case_ )
_lowercase : Union[str, Any] = ['''A picture of a woman with a dog sitting in a beach''']
_lowercase : str = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
_lowercase : Dict = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
_lowercase : Union[str, Any] = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 719 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : str , ) -> int:
'''simple docstring'''
_lowercase : List[Any] = parent
_lowercase : List[str] = 13
_lowercase : str = 7
_lowercase : Dict = True
_lowercase : Union[str, Any] = True
_lowercase : Dict = True
_lowercase : str = True
_lowercase : str = True
_lowercase : List[str] = False
_lowercase : List[Any] = False
_lowercase : Optional[Any] = False
_lowercase : str = 2
_lowercase : str = 99
_lowercase : Optional[Any] = 0
_lowercase : List[str] = 32
_lowercase : Union[str, Any] = 2
_lowercase : List[str] = 4
_lowercase : Optional[int] = 0.1
_lowercase : Optional[Any] = 0.1
_lowercase : Optional[Any] = 512
_lowercase : int = 16
_lowercase : List[Any] = 2
_lowercase : Optional[Any] = 0.02
_lowercase : Any = 3
_lowercase : List[str] = 4
_lowercase : List[Any] = '''last'''
_lowercase : str = True
_lowercase : Any = None
_lowercase : List[str] = 0
def __lowercase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_lowercase : Any = None
if self.use_input_lengths:
_lowercase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Union[str, Any] = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowercase : Dict = None
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : int = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = TFFlaubertModel(config=UpperCamelCase_ )
_lowercase : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_lowercase : List[str] = model(UpperCamelCase_ )
_lowercase : Optional[int] = [input_ids, input_mask]
_lowercase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = TFFlaubertWithLMHeadModel(UpperCamelCase_ )
_lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_lowercase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
_lowercase : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_lowercase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = TFFlaubertForSequenceClassification(UpperCamelCase_ )
_lowercase : str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_lowercase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_labels
_lowercase : int = TFFlaubertForTokenClassification(config=UpperCamelCase_ )
_lowercase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowercase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = self.num_choices
_lowercase : Tuple = TFFlaubertForMultipleChoice(config=UpperCamelCase_ )
_lowercase : Any = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Any = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = TFFlaubertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def __lowercase ( self : Dict ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase_ )
def __lowercase ( self : Dict ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase_ )
@slow
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = TFFlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : str ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_lowercase : Optional[int] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_lowercase : Tuple = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 411 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class _lowercase :
_lowercase : List[str] = list_field(
default=[],metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
},)
_lowercase : List[int] = list_field(
default=[8],metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
_lowercase : List[int] = list_field(
default=[8, 32, 128, 512],metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'},)
_lowercase : bool = field(
default=__lowerCamelCase,metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'},)
_lowercase : bool = field(
default=__lowerCamelCase,metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'},)
_lowercase : bool = field(
default=__lowerCamelCase,metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Use FP16 to accelerate inference.'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Benchmark training of model'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Verbose memory tracing'} )
_lowercase : bool = field(
default=__lowerCamelCase,metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'},)
_lowercase : bool = field(
default=__lowerCamelCase,metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
},)
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Trace memory line by line'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Save result to a CSV file'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Save all print statements in a log file'} )
_lowercase : bool = field(default=__lowerCamelCase,metadata={'help': 'Whether to print environment information'} )
_lowercase : bool = field(
default=__lowerCamelCase,metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
},)
_lowercase : str = field(
default=F'''inference_time_{round(time() )}.csv''',metadata={'help': 'CSV filename used if saving time results to csv.'},)
_lowercase : str = field(
default=F'''inference_memory_{round(time() )}.csv''',metadata={'help': 'CSV filename used if saving memory results to csv.'},)
_lowercase : str = field(
default=F'''train_time_{round(time() )}.csv''',metadata={'help': 'CSV filename used if saving time results to csv for training.'},)
_lowercase : str = field(
default=F'''train_memory_{round(time() )}.csv''',metadata={'help': 'CSV filename used if saving memory results to csv for training.'},)
_lowercase : str = field(
default=F'''env_info_{round(time() )}.csv''',metadata={'help': 'CSV filename used if saving environment information.'},)
_lowercase : str = field(
default=F'''log_{round(time() )}.csv''',metadata={'help': 'Log filename used if print statements are saved in log.'},)
_lowercase : int = field(default=3,metadata={'help': 'Times an experiment will be run.'} )
_lowercase : bool = field(
default=__lowerCamelCase,metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
},)
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowerCamelCase__ , )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 203 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 203 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> None:
if len(__A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowerCAmelCase_ :list[float] = list(__A )
lowerCAmelCase_ :Optional[int] = degree
def __add__( self , __A ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCAmelCase_ :str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __A )
else:
lowerCAmelCase_ :Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __A )
def __sub__( self , __A ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , __A ) -> Polynomial:
lowerCAmelCase_ :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __A )
def __lowerCAmelCase ( self , __A ) -> int | float:
lowerCAmelCase_ :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCAmelCase_ :Optional[Any] = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__A )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __lowerCAmelCase ( self ) -> Polynomial:
lowerCAmelCase_ :list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCAmelCase_ :Dict = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __A )
def __lowerCAmelCase ( self , __A = 0 ) -> Polynomial:
lowerCAmelCase_ :list[float] = [0] * (self.degree + 2)
lowerCAmelCase_ :int = constant
for i in range(self.degree + 1 ):
lowerCAmelCase_ :Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __A )
def __eq__( self , __A ) -> bool:
if not isinstance(__A , __A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , __A ) -> bool:
return not self.__eq__(__A )
| 718 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = 1
lowerCAmelCase_ :int = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ :Optional[Any] = pre_numerator
lowerCAmelCase_ :int = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ :str = cur_numerator
lowerCAmelCase_ :List[Any] = e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 256 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( __A ):
__lowerCamelCase: int = ['pixel_values']
def __init__( self : Optional[int] , a : Dict = True , a : Tuple = None , a : Union[str, Any] = PILImageResampling.BICUBIC , a : int = True , a : str = None , a : Tuple = True , a : List[Any] = 1 / 2_5_5 , a : Dict = True , a : List[Any] = None , a : Tuple = None , a : List[str] = True , **a : List[str] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : Dict = size if size is not None else {'shortest_edge': 2_2_4}
lowercase_ : Any = get_size_dict(a , default_to_square=a )
lowercase_ : List[str] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase_ : Union[str, Any] = get_size_dict(a , default_to_square=a , param_name="crop_size" )
lowercase_ : Tuple = do_resize
lowercase_ : int = size
lowercase_ : Optional[int] = resample
lowercase_ : Union[str, Any] = do_center_crop
lowercase_ : Any = crop_size
lowercase_ : Optional[int] = do_rescale
lowercase_ : Optional[Any] = rescale_factor
lowercase_ : List[str] = do_normalize
lowercase_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : Any = do_convert_rgb
def lowerCAmelCase__ ( self : str , a : Union[str, Any] , a : Any , a : Optional[int] = PILImageResampling.BICUBIC , a : Optional[int] = None , **a : Dict , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase_ : Optional[Any] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : Dict , a : Union[str, Any] = None , **a : List[Any] , ):
'''simple docstring'''
lowercase_ : Optional[int] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def lowerCAmelCase__ ( self : int , a : Union[str, Any] , a : List[str] , a : List[str] = None , **a : int , ):
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] , a : List[str] , a : Optional[int] , a : Optional[Any] = None , **a : Optional[Any] , ):
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] , a : Optional[int] = None , a : Optional[Any] = None , a : Tuple = None , a : Optional[int] = None , a : Dict = None , a : List[str] = None , a : int = None , a : List[str] = None , a : Any = None , a : str = None , a : int = None , a : Dict = None , a : Any = ChannelDimension.FIRST , **a : Any , ):
'''simple docstring'''
lowercase_ : List[str] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Any = size if size is not None else self.size
lowercase_ : Any = get_size_dict(a , param_name="size" , default_to_square=a )
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : str = crop_size if crop_size is not None else self.crop_size
lowercase_ : Optional[Any] = get_size_dict(a , param_name="crop_size" , default_to_square=a )
lowercase_ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : str = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : List[str] = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase_ : List[Any] = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase_ : List[str] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase_ : Tuple = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase_ : List[Any] = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase_ : str = [to_channel_dimension_format(a , a ) for image in images]
lowercase_ : str = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 620 | # flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 558 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case = FlaxAutoencoderKL
@property
def A( self : Tuple ) -> Dict:
'''simple docstring'''
A = 4
A = 3
A = (3_2, 3_2)
A = jax.random.PRNGKey(0 )
A = jax.random.uniform(UpperCamelCase__ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
A = self.dummy_input
return init_dict, inputs_dict
| 712 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCAmelCase_ = '.'
if __name__ == "__main__":
lowerCAmelCase_ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCAmelCase_ = []
lowerCAmelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCAmelCase_ = line.strip()
lowerCAmelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCAmelCase_ = '\n'.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 110 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : Tuple , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , param_name='''size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = DownBlockaD # noqa F405
__UpperCAmelCase : int = 'down'
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Any = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
def lowercase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = SkipDownBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
@property
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
@property
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'mid'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : List[str] = 'mid'
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def lowercase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = AttnUpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = SkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'up'
@property
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCAmelCase )
| 486 | 0 |
"""simple docstring"""
from collections import defaultdict
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
A__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase_ )
if ret % 2 == 0:
cuts.append(UpperCAmelCase_ )
return ret
def _snake_case ( ):
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = 1_0, 9
SCREAMING_SNAKE_CASE_ : Any = defaultdict(list)
SCREAMING_SNAKE_CASE_ : dict[int, bool] = {}
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = [0] * len(UpperCAmelCase_ )
A__ = []
A__ = [1] * len(UpperCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
print(max(UpperCAmelCase_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE_ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 500 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = KandinskyVaaControlnetImgaImgPipeline
__lowerCAmelCase : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowerCAmelCase : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowerCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Union[str, Any] = False
@property
def __lowerCamelCase ( self :Optional[Any] ):
return 3_2
@property
def __lowerCamelCase ( self :List[Any] ):
return 3_2
@property
def __lowerCamelCase ( self :str ):
return self.time_input_dim
@property
def __lowerCamelCase ( self :Any ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :Optional[int] ):
return 1_0_0
@property
def __lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Any = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ : Dict = UNetaDConditionModel(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Optional[int] ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = self.dummy_unet
snake_case__ : Any = self.dummy_movq
snake_case__ : str = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case__ : Union[str, Any] = DDIMScheduler(**__lowercase )
snake_case__ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :int=0 ):
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__lowercase ) ).to(__lowercase )
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
snake_case__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(__lowercase ) ).to(__lowercase )
snake_case__ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ : int = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create hint
snake_case__ : int = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : List[Any] = torch.manual_seed(__lowercase )
else:
snake_case__ : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : Optional[Any] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = '''cpu'''
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Union[str, Any] = self.pipeline_class(**__lowercase )
snake_case__ : Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Any = pipe(
**self.get_dummy_inputs(__lowercase ) ,return_dict=__lowercase ,)[0]
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : List[str] = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case__ : List[Any] = init_image.resize((5_1_2, 5_1_2) )
snake_case__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case__ : Tuple = torch.from_numpy(np.array(__lowercase ) ).float() / 255.0
snake_case__ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
snake_case__ : int = '''A robot, 4k photo'''
snake_case__ : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
snake_case__ : Optional[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' ,torch_dtype=torch.floataa )
snake_case__ : Optional[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
snake_case__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
__lowercase ,image=__lowercase ,strength=0.85 ,generator=__lowercase ,negative_prompt='''''' ,).to_tuple()
snake_case__ : Union[str, Any] = pipeline(
image=__lowercase ,image_embeds=__lowercase ,negative_image_embeds=__lowercase ,hint=__lowercase ,generator=__lowercase ,num_inference_steps=1_0_0 ,height=5_1_2 ,width=5_1_2 ,strength=0.5 ,output_type='''np''' ,)
snake_case__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase )
| 252 |
from __future__ import annotations
import math
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
snake_case__ : Tuple = u
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Tuple = int(input('''enter the numbers of values: ''' ) )
snake_case__ : list[list[float]] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
snake_case__ : Union[str, Any] = 0
print('''enter the values of parameters in a list: ''' )
snake_case__ : List[str] = list(map(__lowerCAmelCase , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__lowerCAmelCase ):
snake_case__ : int = float(input() )
snake_case__ : Union[str, Any] = int(input('''enter the value to interpolate: ''' ) )
snake_case__ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
snake_case__ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : str = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 252 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __a ( unittest.TestCase , lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def snake_case_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase = self.tool('hey' )
_lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def snake_case_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase = self.tool('hey' )
_lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 706 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Dict = XLMTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCamelCase = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a__ ) )
def snake_case_ ( self , a__ ):
_lowerCamelCase = 'lower newer'
_lowerCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self ):
_lowerCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
_lowerCamelCase = 'lower'
_lowerCamelCase = ['low', 'er</w>']
_lowerCamelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCamelCase = tokens + ['<unk>']
_lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
@slow
def snake_case_ ( self ):
_lowerCamelCase = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
_lowerCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=a__ )
_lowerCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=a__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 222 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = None
ops.enable_eager_execution_internal()
_lowerCAmelCase = tf.config.list_physical_devices("""CPU""" )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_lowerCAmelCase = tf.config.list_logical_devices(device_type="""CPU""" )
_lowerCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_lowerCAmelCase = GradientAccumulator()
_lowerCAmelCase = tf.Variable([4.0, 3.0] )
_lowerCAmelCase , _lowerCAmelCase = create_optimizer(5e-5 , 10 , 5 )
_lowerCAmelCase = tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(_lowercase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowercase , _lowercase ):
with strategy.scope():
_lowerCAmelCase = strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(_lowercase , _lowercase ):
_lowerCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 5 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
class _snake_case :
def __init__( self , *, # begin keyword-only arguments
_lowerCamelCase="<s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase=None , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = bos, unk, pad, eos
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Union[str, Any] = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : Any = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : List[str] = self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : str = self.add_symbol(_lowerCamelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = len(self.symbols)
def __eq__( self , _lowerCamelCase):
return self.indices == other.indices
def __getitem__( self , _lowerCamelCase):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self , _lowerCamelCase):
return sym in self.indices
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
UpperCAmelCase__ : Dict = cls()
d.add_from_file(_lowerCamelCase)
return d
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
if word in self.indices and not overwrite:
UpperCAmelCase__ : Tuple = self.indices[word]
UpperCAmelCase__ : Optional[int] = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Union[str, Any] = len(self.symbols)
UpperCAmelCase__ : Optional[int] = idx
self.symbols.append(_lowerCamelCase)
self.count.append(_lowerCamelCase)
return idx
def snake_case__ ( self , _lowerCamelCase):
return 0
def snake_case__ ( self , _lowerCamelCase):
if isinstance(_lowerCamelCase , _lowerCamelCase):
try:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""") as fd:
self.add_from_file(_lowerCamelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCamelCase))
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : List[Any] = self._load_meta(_lowerCamelCase)
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : int = line.rstrip().rsplit(""" """ , 1)
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : int = line.rsplit(""" """ , 1)
else:
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : int = int(_lowerCamelCase)
UpperCAmelCase__ : Any = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCamelCase))
self.add_symbol(_lowerCamelCase , n=_lowerCamelCase , overwrite=_lowerCamelCase)
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""")
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Optional[Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
UpperCAmelCase__ : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
UpperCAmelCase__ : Union[str, Any] = chkpt["""cfg"""]["""model"""]
# dicts
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : Union[str, Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
UpperCAmelCase__ : Dict = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , """config.json""" )
UpperCAmelCase__ : Any = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : List[str] = chkpt["""model"""]
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
UpperCAmelCase__ : Union[str, Any] = model_state_dict.pop(UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = model_state_dict.pop(UpperCamelCase__ )
UpperCAmelCase__ : str = BioGptConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 407 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _a ( _UpperCAmelCase ):
a_ : List[Any] = None
a_ : Optional[Any] = None
@property
def _UpperCamelCase ( self : Optional[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
self.assertTrue(hasattr(A_ , 'padding_value' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = self.feat_extract_tester.seq_length_diff
lowerCamelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase__ = self.feat_extract_tester.min_seq_length
lowerCamelCase__ = self.feat_extract_tester.batch_size
lowerCamelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase__ = feat_extract.pad(A_ , padding=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' )[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ = feat_extract.pad(A_ , pad_to_multiple_of=10 )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , pad_to_multiple_of=10 )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='np' , )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
lowerCamelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCamelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=A_ , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='np' , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ = 12
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
lowerCamelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def _UpperCamelCase ( self : int ):
self._check_padding(numpify=A_ )
def _UpperCamelCase ( self : Optional[int] ):
self._check_padding(numpify=A_ )
def _UpperCamelCase ( self : List[Any] ):
self._check_truncation(numpify=A_ )
def _UpperCamelCase ( self : int ):
self._check_truncation(numpify=A_ )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.feat_extract_dict
lowerCamelCase__ = True
lowerCamelCase__ = self.feature_extraction_class(**A_ )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = [len(A_ ) for x in speech_inputs]
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.feat_extract_dict
lowerCamelCase__ = True
lowerCamelCase__ = self.feature_extraction_class(**A_ )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = [len(A_ ) for x in speech_inputs]
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = min(A_ )
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ : List[str] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowercase__ : Dict = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
lowerCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase = bs[:]
lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
class lowercase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->Tuple:
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase = json.load(_A )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = errors # how to handle errors in decoding
lowerCAmelCase = bytes_to_unicode()
lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
lowerCAmelCase = {}
lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(_A )
lowerCAmelCase = get_pairs(_A )
if not pairs:
return token
while True:
lowerCAmelCase = min(_A , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(_A ):
try:
lowerCAmelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(_A )
lowerCAmelCase = new_word
if len(_A ) == 1:
break
else:
lowerCAmelCase = get_pairs(_A )
lowerCAmelCase = ''' '''.join(_A )
lowerCAmelCase = word
return word
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = []
for token in re.findall(self.pat , _A ):
lowerCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.decoder.get(_A )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = ''''''.join(_A )
lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
lowerCAmelCase = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Any:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
lowerCAmelCase = ''' ''' + text
return (text, kwargs)
| 312 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
_lowerCamelCase = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1] ) ).mean()
_lowerCamelCase = -(labels.shape[-1] * loss.item())
_lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 711 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) ) | 638 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.