code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase =numpy.array([0, 0])
lowercase =numpy.array([0.5, 0.866_0254])
lowercase =numpy.array([1, 0])
lowercase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase__ ( __lowerCamelCase : list[numpy.ndarray] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =initial_vectors
for _ in range(__snake_case ):
_UpperCAmelCase : Dict =iteration_step(__snake_case )
return vectors
def lowerCamelCase__ ( __lowerCamelCase : list[numpy.ndarray] ):
'''simple docstring'''
_UpperCAmelCase : Any =[]
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCAmelCase : str =vectors[i + 1]
new_vectors.append(__snake_case )
_UpperCAmelCase : Dict =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase__ ( __lowerCamelCase : numpy.ndarray , __lowerCamelCase : float ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =numpy.radians(__snake_case )
_UpperCAmelCase : Tuple =numpy.cos(__snake_case ), numpy.sin(__snake_case )
_UpperCAmelCase : Tuple =numpy.array(((c, -s), (s, c)) )
return numpy.dot(__snake_case , __snake_case )
def lowerCamelCase__ ( __lowerCamelCase : list[numpy.ndarray] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCAmelCase : Dict =zip(*__snake_case )
plt.plot(__snake_case , __snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 446
|
from math import sqrt
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
snake_case__ :List[str] = True
# 0 and 1 are none primes.
if number <= 1:
snake_case__ :List[str] = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case__ :Optional[int] = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def lowercase_ ( __snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case__ :Union[str, Any] = list(range(2 , n + 1 ) )
snake_case__ :Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case__ :List[Any] = 0
# filters actual prime numbers.
snake_case__ :Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def lowercase_ ( __snake_case : str ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
snake_case__ :List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def lowercase_ ( __snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
snake_case__ :int = [] # this list will be returns of the function.
# potential prime number factors.
snake_case__ :Tuple = 2
snake_case__ :Union[str, Any] = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def lowercase_ ( __snake_case : Any ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case__ :Union[str, Any] = 0
# prime factorization of 'number'
snake_case__ :List[Any] = prime_factorization(__snake_case )
snake_case__ :Any = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def lowercase_ ( __snake_case : Optional[int] ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case__ :Union[str, Any] = 0
# prime factorization of 'number'
snake_case__ :Tuple = prime_factorization(__snake_case )
snake_case__ :Dict = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def lowercase_ ( __snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowercase_ ( __snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowercase_ ( __snake_case : Optional[Any] ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
snake_case__ :Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case__ :int = get_prime_numbers(__snake_case )
snake_case__ :Tuple = len(__snake_case )
# run variable for while-loops.
snake_case__ :List[Any] = 0
snake_case__ :Tuple = None
# exit variable. for break up the loops
snake_case__ :Any = True
while i < len_pn and loop:
snake_case__ :List[str] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case__ :Optional[int] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Tuple ) -> int:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case__ :Optional[Any] = 0
while numbera != 0:
snake_case__ :int = numbera % numbera
snake_case__ :Any = numbera
snake_case__ :List[Any] = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case__ :List[str] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case__ :Tuple = prime_factorization(__snake_case )
snake_case__ :Union[str, Any] = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
snake_case__ :Tuple = []
snake_case__ :Dict = []
snake_case__ :str = max(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = 0
snake_case__ :List[Any] = 0
snake_case__ :str = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case__ :Union[str, Any] = prime_fac_a.count(__snake_case )
snake_case__ :Tuple = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
snake_case__ :str = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case__ :Any = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase_ ( __snake_case : Optional[Any] ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
snake_case__ :Tuple = 0
snake_case__ :List[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case__ :Any = p_number_a + 1 # jump to the next number
snake_case__ :List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase_ ( __snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
snake_case__ :Optional[int] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase_ ( __snake_case : Tuple ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case__ :List[str] = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase_ ( __snake_case : str , __snake_case : Any ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case__ :Union[str, Any] = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase_ ( __snake_case : Tuple ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
snake_case__ :int = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
snake_case__ :Union[str, Any] = 0
snake_case__ :int = 1
snake_case__ :int = 1 # this will be return
for _ in range(n - 1 ):
snake_case__ :List[Any] = ans
ans += fiba
snake_case__ :Optional[Any] = tmp
return ans
| 241
| 0
|
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
A = 0
A = 0
A = {}
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
A = weight
A = weight
def UpperCamelCase__ ( self ) -> int:
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
A = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self ) -> List[str]:
A = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def UpperCamelCase__ ( self ) -> Any:
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase__ ( self ) -> str:
return self.adjacency.keys()
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_=None ,lowerCamelCase_=None ) -> Optional[int]:
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[str]:
A = {}
A = {}
def __len__( self ) -> int:
return len(self.parent )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
if item in self.parent:
return self.find(lowerCamelCase_ )
A = item
A = 0
return item
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> str:
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = self.find(lowerCamelCase_ )
A = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> Dict:
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(lowerCamelCase_ )
A = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_ ,lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=lowerCamelCase_ )
return mst
| 703
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase =256
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''melgan''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> None:
super().__init__()
# From MELGAN
A = math.log(1E-5 ) # Matches MelGAN training.
A = 4.0 # Largest value for most examples
A = 1_2_8
self.register_modules(
notes_encoder=lowerCamelCase_ ,continuous_encoder=lowerCamelCase_ ,decoder=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,melgan=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> str:
A , A = output_range
if clip:
A = torch.clip(lowerCamelCase_ ,self.min_value ,self.max_value )
# Scale to [0, 1].
A = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> Optional[Any]:
A , A = input_range
A = torch.clip(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) if clip else outputs
# Scale to [0, 1].
A = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = input_tokens > 0
A , A = self.notes_encoder(
encoder_input_tokens=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
A , A = self.continuous_encoder(
encoder_inputs=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = noise_time
if not torch.is_tensor(lowerCamelCase_ ):
A = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
A = self.decoder(
encodings_and_masks=lowerCamelCase_ ,decoder_input_tokens=lowerCamelCase_ ,decoder_noise_time=lowerCamelCase_ )
return logits
@torch.no_grad()
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 1_0_0 ,lowerCamelCase_ = True ,lowerCamelCase_ = "numpy" ,lowerCamelCase_ = None ,lowerCamelCase_ = 1 ,) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowerCamelCase_ )}.' )
A = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
A = np.zeros([1, 0, self.n_dims] ,np.floataa )
A = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase_ ):
if i == 0:
A = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
A = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A = ones
A = self.scale_features(
lowerCamelCase_ ,output_range=[-1.0, 1.0] ,clip=lowerCamelCase_ )
A = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=lowerCamelCase_ ,continuous_mask=lowerCamelCase_ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=lowerCamelCase_ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A = self.decode(
encodings_and_masks=lowerCamelCase_ ,input_tokens=lowerCamelCase_ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
A = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = self.scale_to_features(lowerCamelCase_ ,input_range=[-1.0, 1.0] )
A = mel[:1]
A = mel.cpu().float().numpy()
A = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ )
logger.info("""Generated segment""" ,lowerCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
A = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 255
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''xlm-roberta'''
def __init__( self : List[Any] , UpperCAmelCase_ : List[str]=3_0522 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]="absolute" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@property
def _A ( self : Tuple ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 62
|
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
SCREAMING_SNAKE_CASE : Union[str, Any] = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.expand_dims(lowercase , 1 ) * jnp.expand_dims(lowercase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE : Optional[int] = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.cos(lowercase ), jnp.sin(lowercase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(lowercase ), jnp.cos(lowercase )] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = jnp.reshape(lowercase , [jnp.shape(lowercase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(UpperCAmelCase_ )
return temb
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : bool = False
UpperCamelCase_ : float = 1
@nn.compact
def __call__( self : Optional[int] , UpperCAmelCase_ : int ):
return get_sinusoidal_embeddings(
UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 62
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633
| 1
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ : Optional[Any] = True
except ImportError:
lowerCamelCase__ : str = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ : List[str] = _get_torch_home()
except ImportError:
lowerCamelCase__ : Any = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCamelCase__ : Optional[int] = os.path.join(torch_cache_home, """transformers""")
lowerCamelCase__ : Optional[Any] = """https://cdn.huggingface.co"""
lowerCamelCase__ : Union[str, Any] = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCamelCase__ : Tuple = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCamelCase__ : Dict = os.path.join(PATH, """config.yaml""")
lowerCamelCase__ : Dict = os.path.join(PATH, """attributes.txt""")
lowerCamelCase__ : Optional[int] = os.path.join(PATH, """objects.txt""")
lowerCamelCase__ : Any = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCamelCase__ : Any = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ : Optional[int] = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ : Dict = """pytorch_model.bin"""
lowerCamelCase__ : Union[str, Any] = """config.yaml"""
def UpperCamelCase ( lowercase_=OBJECTS , lowercase_=ATTRIBUTES ) -> Tuple:
'''simple docstring'''
lowercase__ : List[Any] = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
lowercase__ : List[str] = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : List[Any] = OrderedDict()
with open(lowercase_ , """rb""" ) as f:
lowercase__ : int = pkl.load(lowercase_ )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase__ : List[Any] = ckp.pop(lowercase_ )
if isinstance(lowercase_ , np.ndarray ):
lowercase__ : List[str] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ , torch.tensor ), type(lowercase_ )
lowercase__ : Tuple = v
return r
class _snake_case :
__lowerCAmelCase : str = {}
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "root" , SCREAMING_SNAKE_CASE_=0):
'''simple docstring'''
lowercase__ : Dict = name
lowercase__ : List[Any] = level
lowercase__ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase__ : Optional[int] = copy.deepcopy(SCREAMING_SNAKE_CASE_)
lowercase__ : str = copy.deepcopy(SCREAMING_SNAKE_CASE_)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : Any = Config(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , level=level + 1)
lowercase__ : Tuple = v
setattr(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = d
def __repr__( self):
'''simple docstring'''
return str(list((self._pointer.keys())))
def __setattr__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = val
lowercase__ : Optional[Any] = val
lowercase__ : Union[str, Any] = key.split(""".""")
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_) - 1
lowercase__ : Tuple = self._pointer
if len(SCREAMING_SNAKE_CASE_) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE_):
if hasattr(self , SCREAMING_SNAKE_CASE_) and isinstance(getattr(self , SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_):
setattr(getattr(self , SCREAMING_SNAKE_CASE_) , """.""".join(levels[i:]) , SCREAMING_SNAKE_CASE_)
if l == last_level:
lowercase__ : Dict = val
else:
lowercase__ : List[str] = pointer[l]
def lowercase__ ( self):
'''simple docstring'''
return self._pointer
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
with open(f'{file_name}' , """w""") as stream:
dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
with open(f'{file_name}' , """w""") as stream:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@staticmethod
def lowercase__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_) as stream:
lowercase__ : int = load(SCREAMING_SNAKE_CASE_ , Loader=SCREAMING_SNAKE_CASE_)
return data
def __str__( self):
'''simple docstring'''
lowercase__ : str = """ """
if self._name != "root":
lowercase__ : List[Any] = f'{t * (self._level-1)}{self._name}:\n'
else:
lowercase__ : Dict = """"""
lowercase__ : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE_).__name__})\n'
lowercase__ : Dict = level
return r[:-1]
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return cls(SCREAMING_SNAKE_CASE_)
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = kwargs.pop("""cache_dir""" , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = kwargs.pop("""force_download""" , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = kwargs.pop("""resume_download""" , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = kwargs.pop("""proxies""" , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = kwargs.pop("""local_files_only""" , SCREAMING_SNAKE_CASE_)
if os.path.isdir(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif os.path.isfile(SCREAMING_SNAKE_CASE_) or is_remote_url(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[int] = pretrained_model_name_or_path
else:
lowercase__ : int = hf_bucket_url(SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , use_cdn=SCREAMING_SNAKE_CASE_)
try:
# Load from URL or cache if already cached
lowercase__ : Optional[int] = cached_path(
SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase__ : Union[str, Any] = Config.load_yaml(SCREAMING_SNAKE_CASE_)
except EnvironmentError:
lowercase__ : Optional[Any] = """Can't load config for"""
raise EnvironmentError(SCREAMING_SNAKE_CASE_)
if resolved_config_file == config_file:
print("""loading configuration file from path""")
else:
print("""loading configuration file cache""")
return Config.load_yaml(SCREAMING_SNAKE_CASE_), kwargs
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : str = torch.load("""dump.pt""" , map_location=in_tensor.device )
lowercase__ : int = in_tensor.numpy()
lowercase__ : Optional[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase_ , lowercase_ , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(lowercase_ , lowercase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def UpperCamelCase ( lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ : Any = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=True ) -> str:
'''simple docstring'''
lowercase__ : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase__ : Dict = """/""" not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=0 , lowercase_=None , ) -> Optional[int]:
'''simple docstring'''
lowercase__ : Dict = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ , lowercase_ ):
ua += "; " + "; ".join("""{}/{}""".format(lowercase_ , lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ , lowercase_ ):
ua += "; " + user_agent
lowercase__ : Any = {"""user-agent""": ua}
if resume_size > 0:
lowercase__ : Tuple = """bytes=%d-""" % (resume_size,)
lowercase__ : Union[str, Any] = requests.get(lowercase_ , stream=lowercase_ , proxies=lowercase_ , headers=lowercase_ )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase__ : Tuple = response.headers.get("""Content-Length""" )
lowercase__ : Optional[int] = resume_size + int(lowercase_ ) if content_length is not None else None
lowercase__ : Dict = tqdm(
unit="""B""" , unit_scale=lowercase_ , total=lowercase_ , initial=lowercase_ , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=10 , lowercase_=False , lowercase_=None , lowercase_=False , ) -> Tuple:
'''simple docstring'''
if cache_dir is None:
lowercase__ : int = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = str(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase__ : Optional[int] = None
if not local_files_only:
try:
lowercase__ : List[Any] = requests.head(lowercase_ , allow_redirects=lowercase_ , proxies=lowercase_ , timeout=lowercase_ )
if response.status_code == 2_00:
lowercase__ : List[Any] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase__ : List[Any] = url_to_filename(lowercase_ , lowercase_ )
# get cache path to put the file
lowercase__ : Optional[Any] = os.path.join(lowercase_ , lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
lowercase__ : Tuple = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase__ : int = cache_path + """.lock"""
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase__ : Any = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowercase_ , """a+b""" ) as f:
yield f
lowercase__ : List[Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
lowercase__ : str = os.stat(lowercase_ ).st_size
else:
lowercase__ : Union[str, Any] = 0
else:
lowercase__ : int = partial(tempfile.NamedTemporaryFile , dir=lowercase_ , delete=lowercase_ )
lowercase__ : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowercase_ , temp_file.name , )
http_get(
lowercase_ , lowercase_ , proxies=lowercase_ , resume_size=lowercase_ , user_agent=lowercase_ , )
os.replace(temp_file.name , lowercase_ )
lowercase__ : Any = {"""url""": url, """etag""": etag}
lowercase__ : List[Any] = cache_path + """.json"""
with open(lowercase_ , """w""" ) as meta_file:
json.dump(lowercase_ , lowercase_ )
return cache_path
def UpperCamelCase ( lowercase_ , lowercase_=None ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = url.encode("""utf-8""" )
lowercase__ : Optional[int] = shaaaa(lowercase_ )
lowercase__ : int = url_hash.hexdigest()
if etag:
lowercase__ : Any = etag.encode("""utf-8""" )
lowercase__ : Any = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
lowercase__ : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : int = str(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : int = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
lowercase__ : Union[str, Any] = get_from_cache(
lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , proxies=lowercase_ , resume_download=lowercase_ , user_agent=lowercase_ , local_files_only=lowercase_ , )
elif os.path.exists(lowercase_ ):
# File, and it exists.
lowercase__ : Optional[int] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase__ , lowercase__ : int = os.path.split(lowercase_ )
lowercase__ : Optional[int] = output_file.replace(""".""" , """-""" ) + """-extracted"""
lowercase__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase__ : Optional[int] = output_path + """.lock"""
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ , ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ , """r""" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
lowercase__ : Tuple = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowercase_ ) )
return output_path_extracted
return output_path
def UpperCamelCase ( lowercase_ , lowercase_="," ) -> str:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
lowercase__ : List[Any] = eval(f.read() )
else:
lowercase__ : Union[str, Any] = requests.get(lowercase_ )
try:
lowercase__ : List[str] = requests.json()
except Exception:
lowercase__ : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase__ : Optional[Any] = eval(lowercase_ )
except Exception:
lowercase__ : Optional[int] = data.split("""\n""" )
req.close()
return data
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = requests.get(lowercase_ )
lowercase__ : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Union[str, Any] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ , """rb""" ) as stream:
lowercase__ : Union[str, Any] = pkl.load(lowercase_ )
lowercase__ : Tuple = weights.pop("""model""" )
lowercase__ : int = {}
for k, v in model.items():
lowercase__ : int = torch.from_numpy(lowercase_ )
if "running_var" in k:
lowercase__ : Optional[Any] = torch.tensor([0] )
lowercase__ : Optional[int] = k.replace("""running_var""" , """num_batches_tracked""" )
lowercase__ : List[Any] = zero
return new
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
print(F'{os.path.abspath(os.path.join(lowercase_ , os.pardir ) )}/demo.ipynb' )
def UpperCamelCase ( lowercase_ , lowercase_="RGB" ) -> Tuple:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
lowercase__ : Optional[Any] = cva.imread(lowercase_ )
else:
lowercase__ : str = get_image_from_url(lowercase_ )
assert img is not None, F'could not connect to: {im}'
lowercase__ : Union[str, Any] = cva.cvtColor(lowercase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase__ : List[Any] = img[:, :, ::-1]
return img
def UpperCamelCase ( lowercase_ , lowercase_=1 ) -> str:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowercase_ ) , lowercase_ ))
| 12
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12
| 1
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_UpperCamelCase : Tuple = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
_UpperCamelCase : str = parser.parse_args()
if args.check_lib:
_UpperCamelCase : str = importlib.import_module('transformers')
_UpperCamelCase : str = Path(transformers_module.__file__).parent
else:
_UpperCamelCase : Dict = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 134
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase : Optional[Any] = Lock()
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase = min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase = max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = []
lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase = temp_rs
lowercase = temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase = temp_rs
lowercase = temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__snake_case )
lowercase = odd_even_transposition(__snake_case )
print('Sorted List\n' )
print(*__snake_case )
if __name__ == "__main__":
main()
| 134
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for part_id in partition_order:
UpperCamelCase = df.where(F'SPARK_PARTITION_ID() = {part_id}').collect()
for row_idx, row in enumerate(_UpperCamelCase):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(1_00).repartition(1)
UpperCamelCase = Spark(_UpperCamelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(10).repartition(2)
UpperCamelCase = [1, 0]
UpperCamelCase = _generate_iterable_examples(_UpperCamelCase , _UpperCamelCase) # Reverse the partitions.
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , _UpperCamelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(10).repartition(1)
UpperCamelCase = SparkExamplesIterable(_UpperCamelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_UpperCamelCase):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator') as generator_mock:
UpperCamelCase = lambda _UpperCamelCase: x.reverse()
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [2, 1, 0])
UpperCamelCase = SparkExamplesIterable(_UpperCamelCase).shuffle_data_sources(_UpperCamelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_UpperCamelCase):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase = SparkExamplesIterable(_UpperCamelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_UpperCamelCase):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase = SparkExamplesIterable(_UpperCamelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_UpperCamelCase):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(1_00).repartition(1)
UpperCamelCase = Spark(_UpperCamelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 280
|
from pathlib import Path
import fire
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Path(_UpperCamelCase)
UpperCamelCase = Path(_UpperCamelCase)
dest_dir.mkdir(exist_ok=_UpperCamelCase)
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines())][:n]
UpperCamelCase = dest_dir.joinpath(path.name)
print(_UpperCamelCase)
dest_path.open('w').write('\n'.join(_UpperCamelCase))
if __name__ == "__main__":
fire.Fire(minify)
| 280
| 1
|
__SCREAMING_SNAKE_CASE = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Return True if there is node that has not iterated.
A : Any = [False] * len(_lowerCamelCase )
A : str = [s]
A : List[str] = True
while queue:
A : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
A : List[str] = True
A : int = u
return visited[t]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = [-1] * (len(_lowerCamelCase ))
A : Tuple = 0
A : Union[str, Any] = []
A : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = float("Inf" )
A : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
A : Optional[int] = min(_lowerCamelCase , graph[parent[s]][s] )
A : Optional[Any] = parent[s]
max_flow += path_flow
A : Dict = sink
while v != source:
A : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A : Optional[Any] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 17
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""numpy""" ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""numpy""" , return_dict=_UpperCAmelCase )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = """google/ncsnpp-celebahq-256"""
UpperCAmelCase__ = UNetaDModel.from_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 , generator=_UpperCAmelCase , output_type="""numpy""" ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase__ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 603
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 1
|
def lowercase ( __A : list ) -> list:
'''simple docstring'''
if len(__A ) <= 1:
return lst
snake_case : int = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case , snake_case : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case : List[str] = 1
return lst
if __name__ == "__main__":
__lowercase : Any = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 315
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCamelCase : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
# test with a raw waveform
snake_case : Optional[int] = np.zeros((34000,) )
snake_case : List[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case , snake_case : List[str] = examples
snake_case : Union[str, Any] = audio_classifier(SCREAMING_SNAKE_CASE_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case : Dict = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
self.run_torchaudio(SCREAMING_SNAKE_CASE_ )
@require_torchaudio
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import datasets
# test with a local file
snake_case : Optional[Any] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
snake_case : Any = dataset[0]["""audio"""]["""array"""]
snake_case : Optional[Any] = audio_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case : Union[str, Any] = pipeline("""audio-classification""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = np.ones((8000,) )
snake_case : int = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
snake_case : List[Any] = [
{"""score""": 0.08_42, """label""": """no"""},
{"""score""": 0.08_38, """label""": """up"""},
{"""score""": 0.08_37, """label""": """go"""},
{"""score""": 0.08_34, """label""": """right"""},
]
snake_case : Dict = [
{"""score""": 0.08_45, """label""": """stop"""},
{"""score""": 0.08_44, """label""": """on"""},
{"""score""": 0.08_41, """label""": """right"""},
{"""score""": 0.08_34, """label""": """left"""},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case : Optional[int] = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case : Tuple = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
import datasets
snake_case : Tuple = """superb/wav2vec2-base-superb-ks"""
snake_case : List[str] = pipeline("""audio-classification""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : int = datasets.load_dataset("""anton-l/superb_dummy""" ,"""ks""" ,split="""test""" )
snake_case : Tuple = np.array(dataset[3]["""speech"""] ,dtype=np.floataa )
snake_case : List[Any] = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=3 ) ,[
{"""score""": 0.9_81, """label""": """go"""},
{"""score""": 0.0_07, """label""": """up"""},
{"""score""": 0.0_06, """label""": """_unknown_"""},
{"""score""": 0.0_01, """label""": """down"""},
] ,)
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
| 315
| 1
|
from copy import deepcopy
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase :int = size
UpperCamelCase :str = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
UpperCamelCase :Union[str, Any] = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self ) -> list[int]:
UpperCamelCase :str = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase :int = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> int:
return index - (index & (-index))
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase :Any = self.next_(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
if right == 0:
return 0
UpperCamelCase :Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase :str = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase :Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase :Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCamelCase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_snake_case : str = copy.deepcopy(self )
_snake_case : Tuple = self.label_schema.copy()
_snake_case : Any = features[self.label_column]
_snake_case : Optional[Any] = label_schema
return task_template
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 652
|
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652
| 1
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __A ( a_ : str ,a_ : str ,**a_ : int ):
lowerCAmelCase : str = AutoConfig.from_pretrained(a_ ,**a_ )
lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 525
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __A ( a_ : int ,a_ : str=0.9_9_9 ,a_ : List[str]="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ : List[str] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : Optional[int] = []
for i in range(a_ ):
lowerCAmelCase : Dict = i / num_diffusion_timesteps
lowerCAmelCase : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class lowerCamelCase ( _A , _A ):
snake_case_ = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ = 2
@register_to_config
def __init__( self , a_ = 1_000 , a_ = 0.00085 , a_ = 0.012 , a_ = "linear" , a_ = None , a_ = "epsilon" , a_ = False , a_ = False , a_ = 1.0 , a_ = "linspace" , a_ = 0 , ):
if trained_betas is not None:
lowerCAmelCase : List[Any] = torch.tensor(a_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase : List[str] = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase : List[Any] = betas_for_alpha_bar(a_ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
lowerCAmelCase : Any = betas_for_alpha_bar(a_ , alpha_transform_type="exp" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase : Optional[int] = 1.0 - self.betas
lowerCAmelCase : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a_ , a_ , a_ )
lowerCAmelCase : Union[str, Any] = use_karras_sigmas
def _lowerCamelCase ( self , a_ , a_=None ):
if schedule_timesteps is None:
lowerCAmelCase : List[str] = self.timesteps
lowerCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase : Union[str, Any] = 1 if len(a_ ) > 1 else 0
else:
lowerCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
lowerCAmelCase : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self , a_ , a_ , ):
lowerCAmelCase : Union[str, Any] = self.index_for_timestep(a_ )
lowerCAmelCase : Any = self.sigmas[step_index]
lowerCAmelCase : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self , a_ , a_ = None , a_ = None , ):
lowerCAmelCase : List[Any] = num_inference_steps
lowerCAmelCase : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , a_ , dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Dict = (np.arange(0 , a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase : int = (np.arange(a_ , 0 , -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase : Any = np.log(a_ )
lowerCAmelCase : int = np.interp(a_ , np.arange(0 , len(a_ ) ) , a_ )
if self.config.use_karras_sigmas:
lowerCAmelCase : int = self._convert_to_karras(in_sigmas=a_ , num_inference_steps=self.num_inference_steps )
lowerCAmelCase : Any = np.array([self._sigma_to_t(a_ , a_ ) for sigma in sigmas] )
lowerCAmelCase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase : int = torch.from_numpy(a_ ).to(device=a_ )
lowerCAmelCase : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(a_ )
lowerCAmelCase : Union[str, Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a_ ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase : str = timesteps.to(a_ , dtype=torch.floataa )
else:
lowerCAmelCase : int = timesteps.to(device=a_ )
# empty dt and derivative
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase : int = defaultdict(a_ )
def _lowerCamelCase ( self , a_ , a_ ):
# get log sigma
lowerCAmelCase : str = np.log(a_ )
# get distribution
lowerCAmelCase : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase : Any = low_idx + 1
lowerCAmelCase : str = log_sigmas[low_idx]
lowerCAmelCase : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
lowerCAmelCase : List[str] = np.clip(a_ , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase : int = (1 - w) * low_idx + w * high_idx
lowerCAmelCase : Dict = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : float = in_sigmas[-1].item()
lowerCAmelCase : float = in_sigmas[0].item()
lowerCAmelCase : str = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase : Optional[int] = np.linspace(0 , 1 , a_ )
lowerCAmelCase : Optional[int] = sigma_min ** (1 / rho)
lowerCAmelCase : str = sigma_max ** (1 / rho)
lowerCAmelCase : List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self ):
return self.dt is None
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ = True , ):
lowerCAmelCase : Optional[int] = self.index_for_timestep(a_ )
# advance index counter by 1
lowerCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase : Optional[Any] = self.sigmas[step_index]
lowerCAmelCase : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase : List[str] = self.sigmas[step_index - 1]
lowerCAmelCase : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase : Optional[int] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase : List[str] = derivative
lowerCAmelCase : Optional[int] = dt
lowerCAmelCase : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase : Dict = (sample - pred_original_sample) / sigma_next
lowerCAmelCase : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase : Optional[int] = self.dt
lowerCAmelCase : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
lowerCAmelCase : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase : List[Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase : str = timesteps.to(original_samples.device )
lowerCAmelCase : Tuple = [self.index_for_timestep(a_ , a_ ) for t in timesteps]
lowerCAmelCase : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sigma.unsqueeze(-1 )
lowerCAmelCase : str = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 525
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=False , ) -> Optional[Any]:
'''simple docstring'''
lowercase = size if size is not None else {"""height""": 20, """width""": 20}
lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_reduce_labels
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(dataset[0]["""file"""] )
lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(ds[0]["""file"""] )
lowercase = Image.open(ds[1]["""file"""] )
lowercase = Image.open(ds[2]["""file"""] )
lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = BeitImageProcessor if is_vision_available() else None
def _a ( self ) -> int:
'''simple docstring'''
lowercase = BeitImageProcessingTester(self )
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
lowercase = []
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowercase , lowercase = prepare_semantic_batch_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowercase = True
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 653
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653
| 1
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE_ = 2_99_79_24_58
# Symbols
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = symbols('ct x y z')
def UpperCamelCase__ ( _lowercase : float ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase__ ( _lowercase : float ) -> float:
return 1 / sqrt(1 - beta(_lowercase ) ** 2 )
def UpperCamelCase__ ( _lowercase : float ) -> np.ndarray:
return np.array(
[
[gamma(_lowercase ), -gamma(_lowercase ) * beta(_lowercase ), 0, 0],
[-gamma(_lowercase ) * beta(_lowercase ), gamma(_lowercase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase__ ( _lowercase : float , _lowercase : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
__UpperCAmelCase: List[str] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowercase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE_ = transform(29_97_92_45)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE_ = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 523
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase__ ( _lowercase : str ) -> List[Any]:
__UpperCAmelCase: List[str] = [False] * len(_lowercase )
__UpperCAmelCase: str = [-1] * len(_lowercase )
def dfs(_lowercase : Dict , _lowercase : Optional[int] ):
__UpperCAmelCase: Optional[int] = True
__UpperCAmelCase: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 523
| 1
|
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : int | float | str , __UpperCAmelCase : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
__magic_name__: Dict = int(__UpperCAmelCase )
__magic_name__: Any = int(__UpperCAmelCase )
__magic_name__: list[str] = []
for temp in range(int(__UpperCAmelCase ) ):
series.append(f'1 / {pow(temp + 1 , int(__UpperCAmelCase ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input('Enter the last number (nth term) of the P-Series'))
__lowerCamelCase = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 702
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features", "attention_mask"]
def __init__( self : int , lowercase_ : int=80 , lowercase_ : int=16000 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Any=10 , lowercase_ : int=25 , lowercase_ : Tuple="hamming_window" , lowercase_ : Any=3_27_68.0 , lowercase_ : int=0.97 , lowercase_ : List[str]=1.0 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=False , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = feature_size
SCREAMING_SNAKE_CASE_ : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : Any = padding_value
SCREAMING_SNAKE_CASE_ : List[str] = hop_length
SCREAMING_SNAKE_CASE_ : List[Any] = win_length
SCREAMING_SNAKE_CASE_ : Dict = frame_signal_scale
SCREAMING_SNAKE_CASE_ : List[Any] = preemphasis_coeff
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mel_floor
SCREAMING_SNAKE_CASE_ : Optional[Any] = normalize_means
SCREAMING_SNAKE_CASE_ : Dict = normalize_vars
SCREAMING_SNAKE_CASE_ : Tuple = win_function
SCREAMING_SNAKE_CASE_ : Any = return_attention_mask
SCREAMING_SNAKE_CASE_ : List[Any] = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ : List[str] = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ : Dict = optimal_fft_length(self.sample_size)
SCREAMING_SNAKE_CASE_ : int = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.array):
'''simple docstring'''
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Tuple = window_function(window_length=self.sample_size , name=self.win_function)
SCREAMING_SNAKE_CASE_ : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE_ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=lowercase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowercase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowercase_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
if self.normalize_means:
SCREAMING_SNAKE_CASE_ : Tuple = x[:input_length].mean(axis=0)
SCREAMING_SNAKE_CASE_ : Dict = np.subtract(lowercase_ , lowercase_)
if self.normalize_vars:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x[:input_length].std(axis=0)
SCREAMING_SNAKE_CASE_ : Dict = np.divide(lowercase_ , lowercase_)
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : int = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Optional[int] = x.astype(np.floataa)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowercase_ , lowercase_ , self.padding_value) for x, n in zip(lowercase_ , lowercase_)]
def __call__( self : Any , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : int = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : Tuple = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : int = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : int = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : List[str] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Tuple = [self._extract_mfsc_features(lowercase_) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Any = BatchFeature({'''input_features''': features})
SCREAMING_SNAKE_CASE_ : List[str] = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Any = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
SCREAMING_SNAKE_CASE_ : str = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowercase_ , dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE_ : List[Any] = (
np.array(lowercase_ , dtype=np.intaa)
if self._get_padding_strategies(lowercase_ , max_length=lowercase_) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE_ : Any = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_)
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Any = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
| 512
|
"""simple docstring"""
from torch import nn
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = class_size
SCREAMING_SNAKE_CASE_ : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
SCREAMING_SNAKE_CASE_ : Any = nn.Linear(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.mlp(lowercase_)
return logits
| 512
| 1
|
from math import factorial
def UpperCamelCase ( _a = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 441
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
return np.dot(_a , _a )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase_ = np.inf , UpperCamelCase_ = "linear" , UpperCamelCase_ = 0.0 , ):
lowercase_ :Dict = regularization
lowercase_ :Optional[Any] = gamma
if kernel == "linear":
lowercase_ :Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowercase_ :Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ :str = f"Unknown kernel: {kernel}"
raise ValueError(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.dot(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = observations
lowercase_ :Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) :Optional[Any] = np.shape(UpperCamelCase_ )
def to_minimize(UpperCamelCase_ ) -> float:
lowercase_ :Dict = 0
((lowercase_) , ) :Tuple = np.shape(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase_ )
lowercase_ :Dict = LinearConstraint(UpperCamelCase_ , 0 , 0 )
lowercase_ :Optional[Any] = Bounds(0 , self.regularization )
lowercase_ :Any = minimize(
UpperCamelCase_ , np.ones(UpperCamelCase_ ) , bounds=UpperCamelCase_ , constraints=[ly_contraint] ).x
lowercase_ :Tuple = l_star
# calculating mean offset of separation plane to points
lowercase_ :str = 0
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ :List[str] = s / n
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '\nimport os\n'
UpperCamelCase = '\ndef foo():\n import os\n return False\n'
UpperCamelCase = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> str:
__UpperCamelCase : str = os.path.join(__lowerCAmelCase , """test_file.py""" )
with open(__lowerCAmelCase , """w""" ) as _tmp_file:
_tmp_file.write(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = get_imports(__lowerCAmelCase )
assert parsed_imports == ["os"]
| 269
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion")
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: List[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg")
lowerCamelCase__: Tuple =torch.manual_seed(0)
lowerCamelCase__: List[str] =pipe(
image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCamelCase__: int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__: Union[str, Any] =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 437
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "falcon"
lowercase_ = ["past_key_values"]
def __init__(self : str , UpperCAmelCase_ : Optional[Any]=65_024 , UpperCAmelCase_ : Union[str, Any]=4_544 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : str=71 , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=11 , UpperCAmelCase_ : List[Any]=11 , **UpperCAmelCase_ : str , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =vocab_size
# Backward compatibility with n_embed kwarg
lowerCamelCase__: List[Any] =kwargs.pop("n_embed" , UpperCAmelCase_)
lowerCamelCase__: Dict =hidden_size if n_embed is None else n_embed
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: Dict =num_attention_heads
lowerCamelCase__: Optional[Any] =layer_norm_epsilon
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Union[str, Any] =use_cache
lowerCamelCase__: str =hidden_dropout
lowerCamelCase__: Tuple =attention_dropout
lowerCamelCase__: Dict =bos_token_id
lowerCamelCase__: Union[str, Any] =eos_token_id
lowerCamelCase__: Any =num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCamelCase__: Dict =alibi
lowerCamelCase__: Optional[Any] =new_decoder_architecture
lowerCamelCase__: List[str] =multi_query # Ignored when new_decoder_architecture is True
lowerCamelCase__: Tuple =parallel_attn
lowerCamelCase__: Optional[Any] =bias
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
return not self.alibi
| 437
| 1
|
from manim import *
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ : int = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Any = [mem.copy() for i in range(6 )]
lowerCamelCase_ : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Any = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : int = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : List[Any] = Text("CPU" , font_size=24 )
lowerCamelCase_ : Dict = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
lowerCamelCase_ : str = [mem.copy() for i in range(4 )]
lowerCamelCase_ : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : List[Any] = Text("GPU" , font_size=24 )
lowerCamelCase_ : int = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
lowerCamelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Tuple = Text("Model" , font_size=24 )
lowerCamelCase_ : List[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Any = []
lowerCamelCase_ : int = []
for i, rect in enumerate(a_ ):
rect.set_stroke(a_ )
lowerCamelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 )
self.add(a_ )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ , *a_ )
lowerCamelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : int = Text("Loaded Checkpoint" , font_size=24 )
lowerCamelCase_ : Optional[int] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a_ )
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : int = []
for i, rect in enumerate(a_ ):
lowerCamelCase_ : Union[str, Any] = fill.copy().set_fill(a_ , opacity=0.7 )
target.move_to(a_ )
ckpt_arr.append(a_ )
lowerCamelCase_ : Optional[int] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
lowerCamelCase_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
lowerCamelCase_ : str = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
lowerCamelCase_ : List[Any] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCamelCase_ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ : str = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : List[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Optional[int] = Text("Disk" , font_size=24 )
lowerCamelCase_ : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) )
lowerCamelCase_ : str = []
for i, rect in enumerate(a_ ):
lowerCamelCase_ : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(FadeOut(a_ ) )
lowerCamelCase_ : Any = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
self.play(
FadeOut(a_ , a_ , *a_ , *a_ ) , )
self.wait()
| 250
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250
| 1
|
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCAmelCase :
def __init__( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any]=1_3 , __magic_name__ : str=7 , __magic_name__ : Tuple=True , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=True , __magic_name__ : str=True , __magic_name__ : List[str]=9_9 , __magic_name__ : List[Any]=6_4 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=3_7 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Dict=5_1_2 , __magic_name__ : List[Any]=1_6 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=3 , __magic_name__ : Any=4 , __magic_name__ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = vocab_size - 1
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = GPTNeoXModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = model(__lowercase , attention_mask=__lowercase )
UpperCamelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXModel(__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any ):
"""simple docstring"""
UpperCamelCase = GPTNeoXForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForQuestionAnswering(__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCamelCase = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
UpperCamelCase = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(__lowercase , attention_mask=__lowercase , output_hidden_states=__lowercase )
UpperCamelCase = output_from_no_past['hidden_states'][0]
UpperCamelCase = model(
__lowercase , attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = GPTNeoXModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__lowercase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = GPTNeoXModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
UpperCamelCase = original_model(__lowercase ).last_hidden_state
UpperCamelCase = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0}
UpperCamelCase = GPTNeoXModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
UpperCamelCase = scaled_model(__lowercase ).last_hidden_state
UpperCamelCase = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowercase )
UpperCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
UpperCamelCase = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=2_0 )
UpperCamelCase = tokenizer.batch_decode(__lowercase )[0]
self.assertEqual(__lowercase , __lowercase )
| 386
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCamelCase :
'''simple docstring'''
@property
def snake_case__ ( self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def snake_case__ ( self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def snake_case__ ( self , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , ):
"""simple docstring"""
__A : List[str] = 4
__A : List[Any] = 32
__A : Optional[Any] = (32, 32)
__A : List[str] = torch.manual_seed(0 )
__A : Union[str, Any] = torch.device(__lowercase )
__A : Optional[int] = (batch_size, num_channels) + sizes
__A : str = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase )
__A : int = {'hidden_states': hidden_states}
if include_temb:
__A : Optional[Any] = 128
__A : Any = randn_tensor((batch_size, temb_channels) , generator=__lowercase , device=__lowercase )
if include_res_hidden_states_tuple:
__A : Any = torch.manual_seed(1 )
__A : List[Any] = (randn_tensor(__lowercase , generator=__lowercase , device=__lowercase ),)
if include_encoder_hidden_states:
__A : str = floats_tensor((batch_size, 32, 32) ).to(__lowercase )
if include_skip_sample:
__A : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowercase , device=__lowercase )
return dummy_input
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
__A : int = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A ,__A : List[Any] = self.prepare_init_args_and_inputs_for_common()
__A : int = self.block_class(**__lowercase )
unet_block.to(__lowercase )
unet_block.eval()
with torch.no_grad():
__A : List[Any] = unet_block(**__lowercase )
if isinstance(__lowercase , __lowercase ):
__A : Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
__A : Any = output[0, -1, -3:, -3:]
__A : Dict = torch.tensor(__lowercase ).to(__lowercase )
assert torch_all_close(output_slice.flatten() , __lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.prepare_init_args_and_inputs_for_common()
__A : Optional[Any] = self.block_class(**__lowercase )
model.to(__lowercase )
model.train()
__A : Any = model(**__lowercase )
if isinstance(__lowercase , __lowercase ):
__A : Any = output[0]
__A : int = torch.device(__lowercase )
__A : Dict = randn_tensor(output.shape , device=__lowercase )
__A : Dict = torch.nn.functional.mse_loss(__lowercase , __lowercase )
loss.backward()
| 365
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = 42
a = 42
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = 42
a = None
a = None
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "train"
a = "dev"
a = "test"
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def lowercase_ ( __lowerCamelCase : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def lowercase_ ( __lowerCamelCase : List[InputExample] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple="[CLS]" , __lowerCamelCase : Dict=1 , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any=False , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=0 , __lowerCamelCase : str=-100 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Tuple=True , ) -> List[InputFeatures]:
SCREAMING_SNAKE_CASE__ = {label: i for i, label in enumerate(__lowerCamelCase )}
SCREAMING_SNAKE_CASE__ = []
for ex_index, example in enumerate(__lowerCamelCase ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __lowerCamelCase , len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCamelCase ) > 0:
tokens.extend(__lowerCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ = tokenizer.num_special_tokens_to_add()
if len(__lowerCamelCase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ = [sequence_a_segment_id] * len(__lowerCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ = [1 if mask_padding_with_zero else 0] * len(__lowerCamelCase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ = max_seq_length - len(__lowerCamelCase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowerCamelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowerCamelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowerCamelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowerCamelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowerCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = None
features.append(
InputFeatures(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , label_ids=__lowerCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = nn.CrossEntropyLoss().ignore_index
def __init__( self : str , __lowerCamelCase : TokenClassificationTask , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Split = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowerCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + '''.lock'''
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
SCREAMING_SNAKE_CASE__ = torch.load(__lowerCamelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(__lowerCamelCase , __lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __lowerCamelCase )
def __len__( self : Tuple ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Any , __lowerCamelCase : Optional[Any] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = -1_00
def __init__( self : Union[str, Any] , __lowerCamelCase : TokenClassificationTask , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Split = Split.train , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(__lowerCamelCase , __lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
__lowerCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
__lowerCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase_ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> Optional[int]:
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Tuple ) -> InputFeatures:
return self.features[i]
| 472
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : List[Any]=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[Any]=1 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embeddings_size
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = out_indices
SCREAMING_SNAKE_CASE__ = num_groups
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : List[str] ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : str ) -> Dict:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowercase_ ( self : Any ) -> Dict:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowercase_ ( self : Optional[int] ) -> str:
pass
def lowercase_ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowercase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ = layer_type
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowercase_ ( self : List[str] ) -> Dict:
pass
def lowercase_ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[Any] ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitBackbone,) if is_torch_available() else ()
a = BitConfig
a = False
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
| 472
| 1
|
__UpperCamelCase: Dict = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def SCREAMING_SNAKE_CASE__ ( _lowercase : str ) -> int:
'''simple docstring'''
lowercase__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
lowercase__ : int = 0
lowercase__ : Optional[int] = 0
while place < len(_lowercase ):
if (place + 1 < len(_lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def SCREAMING_SNAKE_CASE__ ( _lowercase : int ) -> str:
'''simple docstring'''
lowercase__ : Optional[Any] = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : Optional[int] = divmod(_lowercase , _lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Any, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str]=13, lowerCamelCase_: Optional[Any]=7, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: Tuple=True, lowerCamelCase_: Any=False, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=99, lowerCamelCase_: Tuple=32, lowerCamelCase_: Any=5, lowerCamelCase_: Tuple=4, lowerCamelCase_: List[Any]=37, lowerCamelCase_: Union[str, Any]="gelu", lowerCamelCase_: str=0.1, lowerCamelCase_: Union[str, Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: Union[str, Any]=16, lowerCamelCase_: Any=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=3, lowerCamelCase_: List[str]=4, lowerCamelCase_: Tuple=None, ):
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Dict = scope
def snake_case__( self: Union[str, Any] ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size], self.num_choices )
lowercase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self: Tuple ):
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase_, )
def snake_case__( self: Optional[int], lowerCamelCase_: Optional[int], lowerCamelCase_: List[Any], lowerCamelCase_: List[str], lowerCamelCase_: Optional[int], lowerCamelCase_: Dict, lowerCamelCase_: Optional[int], lowerCamelCase_: str ):
lowercase__ : Union[str, Any] = OpenLlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowercase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Tuple, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str], lowerCamelCase_: Dict, lowerCamelCase_: Union[str, Any], ):
lowercase__ : Tuple = True
lowercase__ : int = OpenLlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowercase__ : Optional[int] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, )
lowercase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: List[Any], lowerCamelCase_: Optional[Any], lowerCamelCase_: Any, lowerCamelCase_: str, lowerCamelCase_: List[str], lowerCamelCase_: Any, lowerCamelCase_: Dict, lowerCamelCase_: int, lowerCamelCase_: Any, lowerCamelCase_: str, ):
lowercase__ : Optional[Any] = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self: Tuple, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Optional[int], lowerCamelCase_: str, lowerCamelCase_: List[Any], lowerCamelCase_: List[Any], lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Union[str, Any], ):
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowercase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_, )
lowercase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowercase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowercase__ : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowercase__ : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-3 ) )
def snake_case__( self: Optional[Any] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def snake_case__( self: Any ):
lowercase__ : List[Any] = OpenLlamaModelTester(self )
lowercase__ : Dict = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def snake_case__( self: Any ):
self.config_tester.run_common_tests()
def snake_case__( self: Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: str ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : int = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Optional[Any] = 'single_label_classification'
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : str = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Dict = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Union[str, Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : List[Any] = 'multi_label_classification'
lowercase__ : Any = input_dict['input_ids']
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[int] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__( self: List[str] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__( self: Any, lowerCamelCase_: List[Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = ids_tensor([1, 10], config.vocab_size )
lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : str = OpenLlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowercase__ : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : List[str] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'type': scaling_type, 'factor': 1_0.0}
lowercase__ : Union[str, Any] = OpenLlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowercase__ : Dict = scaled_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : Union[str, Any] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
| 266
| 1
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False , **UpperCamelCase_ ) -> List[Any]:
super().__init__(**UpperCamelCase_ )
__lowercase : Tuple = vocab_size
__lowercase : Any = d_embed
__lowercase : List[Any] = d_proj
__lowercase : List[str] = cutoffs + [vocab_size]
__lowercase : Dict = [0] + self.cutoffs
__lowercase : Any = div_val
__lowercase : str = self.cutoffs[0]
__lowercase : Optional[int] = len(self.cutoffs ) - 1
__lowercase : Union[str, Any] = self.shortlist_size + self.n_clusters
__lowercase : List[Any] = keep_order
__lowercase : Union[str, Any] = []
__lowercase : Optional[Any] = []
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if self.n_clusters > 0:
__lowercase : Dict = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=UpperCamelCase_ , name='''cluster_weight''' )
__lowercase : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowercase : str = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_projs_._{i}""" , )
self.out_projs.append(UpperCamelCase_ )
else:
self.out_projs.append(UpperCamelCase_ )
__lowercase : int = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_layers_._{i}_._weight""" , )
__lowercase : Tuple = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase ,__lowercase : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase : Optional[Any] = self.d_embed // (self.div_val**i)
__lowercase : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_projs_._{i}""" )
self.out_projs.append(UpperCamelCase_ )
__lowercase : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_layers_._{i}_._weight""" , )
__lowercase : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase_ )
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[int]:
__lowercase : Union[str, Any] = x
if proj is not None:
__lowercase : Dict = tf.einsum('''ibd,ed->ibe''' , UpperCamelCase_ , UpperCamelCase_ )
return tf.einsum('''ibd,nd->ibn''' , UpperCamelCase_ , UpperCamelCase_ ) + b
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Dict = shape_list(UpperCamelCase_ )
__lowercase : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
__lowercase : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=False ) -> Optional[int]:
__lowercase : List[Any] = 0
if self.n_clusters == 0:
__lowercase : str = self._logit(UpperCamelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowercase : Tuple = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase_ , logits=UpperCamelCase_ )
__lowercase : Dict = tf.nn.log_softmax(UpperCamelCase_ , axis=-1 )
else:
__lowercase : str = shape_list(UpperCamelCase_ )
__lowercase : List[Any] = []
__lowercase : Optional[int] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowercase ,__lowercase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowercase : Optional[int] = (target >= l_idx) & (target < r_idx)
__lowercase : Tuple = tf.where(UpperCamelCase_ )
__lowercase : List[str] = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ ) - l_idx
if self.div_val == 1:
__lowercase : List[Any] = self.out_layers[0][0][l_idx:r_idx]
__lowercase : Any = self.out_layers[0][1][l_idx:r_idx]
else:
__lowercase : List[Any] = self.out_layers[i][0]
__lowercase : str = self.out_layers[i][1]
if i == 0:
__lowercase : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowercase : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowercase : Tuple = self._logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.out_projs[0] )
__lowercase : List[Any] = tf.nn.log_softmax(UpperCamelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowercase : Union[str, Any] = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Union[str, Any] = self._gather_logprob(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowercase : int = self._logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.out_projs[i] )
__lowercase : Dict = tf.nn.log_softmax(UpperCamelCase_ )
__lowercase : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowercase : List[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase_ )
if target is not None:
__lowercase : List[str] = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Tuple = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Dict = self._gather_logprob(UpperCamelCase_ , UpperCamelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase_ , -cur_logprob , shape_list(UpperCamelCase_ ) )
__lowercase : Optional[int] = tf.concat(UpperCamelCase_ , axis=-1 )
if target is not None:
if return_mean:
__lowercase : List[Any] = tf.reduce_mean(UpperCamelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 523
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase : List[str] = len(UpperCamelCase_ ) - 1
def _lowerCamelCase ( self , UpperCamelCase_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase_ ) , 5 ) == 1
return output_values
def _lowerCamelCase ( self , UpperCamelCase_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase : Union[str, Any] = self.basis_function(UpperCamelCase_ )
__lowercase : Dict = 0.0
__lowercase : int = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCamelCase ( self , UpperCamelCase_ = 0.0_1 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
__lowercase : list[float] = [] # x coordinates of points to plot
__lowercase : list[float] = [] # y coordinates of points to plot
__lowercase : str = 0.0
while t <= 1:
__lowercase : Union[str, Any] = self.bezier_curve_function(UpperCamelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase : Dict = [i[0] for i in self.list_of_points]
__lowercase : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase_ , UpperCamelCase_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 523
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = ''
_a : List[str] = ''
_a : List[Any] = []
_a : Dict = 0
_a : Optional[int] = 256
_a : int = 0
_a : Optional[int] = 0
_a : Tuple = 0
_a : Optional[int] = 0
def __lowercase ( self : List[Any] ,_a : Any ):
'''simple docstring'''
_a : List[Any] = cva.imread(_a ,0 )
_a : int = copy.deepcopy(self.img )
_a, _a, _a : Any = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label='x' )
_a : str = np.sum(_a )
for i in range(len(_a ) ):
_a : List[str] = x[i] / self.k
self.sk += prk
_a : Tuple = (self.L - 1) * self.sk
if self.rem != 0:
_a : Optional[Any] = int(last % last )
_a : Optional[int] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_a )
_a : Dict = int(np.ma.count(self.img ) / self.img[1].size )
_a : List[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_a : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_a : Optional[Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' ,self.img )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def __lowercase ( self : Dict ):
'''simple docstring'''
cva.imshow('Output-Image' ,self.img )
cva.imshow('Input-Image' ,self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCAmelCase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 229
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : Dict = LEDConfig
_A : Tuple = {}
_A : Union[str, Any] = '''gelu'''
def __init__( self : List[Any] , _a : Tuple , _a : List[Any]=13 , _a : Union[str, Any]=7 , _a : Any=True , _a : List[str]=False , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : int=4 , _a : List[Any]=37 , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : int=20 , _a : Optional[int]=2 , _a : List[Any]=1 , _a : List[Any]=0 , _a : str=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A_ ( self : int ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase__ = prepare_led_inputs_dict(_a , _a , _a )
UpperCamelCase__ = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
UpperCamelCase__ = global_attention_mask
return config, inputs_dict
def A_ ( self : str , _a : Any , _a : List[str] ):
UpperCamelCase__ = TFLEDModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(_a , attention_mask=_a )[0]
UpperCamelCase__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : int=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : List[str]=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_A : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = True
_A : int = False
_A : Optional[int] = False
_A : int = False
def A_ ( self : Optional[int] ):
UpperCamelCase__ = TFLEDModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : Any ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase__ = 2
UpperCamelCase__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase__ = True
UpperCamelCase__ = self.model_tester.seq_length
UpperCamelCase__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a : int ):
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a : Any ):
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def A_ ( self : List[str] ):
pass
def A_ ( self : Dict ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
return tf.constant(UpperCamelCase__, dtype=tf.intaa )
lowercase = 1E-4
@slow
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Any ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase__ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase__ = prepare_led_inputs_dict(model.config , _a , _a )
UpperCamelCase__ = model(**_a )[0]
UpperCamelCase__ = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-3 , rtol=1E-3 )
| 240
| 0
|
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =n
__lowercase =[None] * self.n
__lowercase =0 # index of the first element
__lowercase =0
__lowercase =0
def __len__( self : Any):
'''simple docstring'''
return self.size
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.size == 0
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def __lowerCamelCase ( self : str , _lowerCAmelCase : List[str]):
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
__lowercase =data
__lowercase =(self.rear + 1) % self.n
self.size += 1
return self
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW')
__lowercase =self.array[self.front]
__lowercase =None
__lowercase =(self.front + 1) % self.n
self.size -= 1
return temp
| 454
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 454
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( __a ):
_A : Optional[int] = '''swinv2'''
_A : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case__=2_24 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , **snake_case__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(lowerCAmelCase__ )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
UpperCAmelCase = (0, 0, 0, 0)
| 673
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = "<<<<<<< This should probably be modified because it mentions: "
__magic_name__ = "=======\n>>>>>>>\n"
__magic_name__ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _lowerCAmelCase ( UpperCamelCase_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@staticmethod
def snake_case_ ( lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=lowerCAmelCase__)
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = get_logger("""datasets-cli/converting""")
__SCREAMING_SNAKE_CASE = tfds_path
__SCREAMING_SNAKE_CASE = datasets_directory
def snake_case_ ( self):
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
__SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.listdir(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if not os.path.isfile(lowerCAmelCase__) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(lowerCAmelCase__ , encoding="""utf-8""") as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
for line in lines:
__SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__SCREAMING_SNAKE_CASE = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__SCREAMING_SNAKE_CASE = """"""
continue
elif "from absl import logging" in out_line:
__SCREAMING_SNAKE_CASE = """from datasets import logging\n"""
elif "getLogger" in out_line:
__SCREAMING_SNAKE_CASE = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: e in out_line , lowerCAmelCase__))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__) + """\n""")
out_lines.append(lowerCAmelCase__)
out_lines.append(lowerCAmelCase__)
continue
else:
for pattern, replacement in TO_CONVERT:
__SCREAMING_SNAKE_CASE = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__SCREAMING_SNAKE_CASE = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCAmelCase__)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
__SCREAMING_SNAKE_CASE = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__SCREAMING_SNAKE_CASE = True
out_lines.append(lowerCAmelCase__)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__SCREAMING_SNAKE_CASE = f_name.replace(""".py""" , """""")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__)
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.writelines(lowerCAmelCase__)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
__SCREAMING_SNAKE_CASE = os.path.basename(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
| 155
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger()
def lowercase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : LevitConfig , lowerCAmelCase : Path , lowerCAmelCase : bool = True):
"""simple docstring"""
print(f"""Converting {name}...""")
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_A : Optional[Any] = timm.create_model('''levit_128s''' , pretrained=lowerCAmelCase)
else:
_A : Any = timm.create_model('''levit_128''' , pretrained=lowerCAmelCase)
if hidden_sizes == 192:
_A : Optional[Any] = timm.create_model('''levit_192''' , pretrained=lowerCAmelCase)
if hidden_sizes == 256:
_A : str = timm.create_model('''levit_256''' , pretrained=lowerCAmelCase)
if hidden_sizes == 384:
_A : List[str] = timm.create_model('''levit_384''' , pretrained=lowerCAmelCase)
from_model.eval()
_A : Any = LevitForImageClassificationWithTeacher(lowerCAmelCase).eval()
_A : List[Any] = OrderedDict()
_A : List[Any] = from_model.state_dict()
_A : List[str] = list(from_model.state_dict().keys())
_A : Tuple = list(our_model.state_dict().keys())
print(len(lowerCAmelCase) , len(lowerCAmelCase))
for i in range(len(lowerCAmelCase)):
_A : str = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase)
_A : Optional[Any] = torch.randn((2, 3, 224, 224))
_A : Dict = from_model(lowerCAmelCase)
_A : Union[str, Any] = our_model(lowerCAmelCase).logits
assert torch.allclose(lowerCAmelCase , lowerCAmelCase), "The model logits don't match the original one."
_A : Dict = name
print(lowerCAmelCase)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
_A : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(f"""Pushed {checkpoint_name}""")
def lowercase ( lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : bool = True):
"""simple docstring"""
_A : Any = '''imagenet-1k-id2label.json'''
_A : int = 1000
_A : str = (1, num_labels)
_A : str = '''huggingface/label-files'''
_A : int = num_labels
_A : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='''dataset''') , '''r'''))
_A : Optional[int] = {int(lowerCAmelCase): v for k, v in idalabel.items()}
_A : List[Any] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
_A : Optional[Any] = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase)
_A : Optional[int] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_A : List[Any] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''ConvNextFeatureExtractor''']
__UpperCamelCase : Union[str, Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 417
| 0
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_ ( lowerCAmelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A = []
A = []
A = []
for rt in rc.restypes:
A = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A = {name: i for i, name in enumerate(lowerCAmelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
A = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
lowerCAmelCase__ , dtype=torch.floataa , device=protein['aatype'].device , )
A = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A = restype_atomaa_to_atomaa[protein_aatype]
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
A = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A = restype_atomaa_to_atomaa[protein_aatype]
A = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
A = rc.restype_atoa[restype_letter]
A = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A = rc.atom_order[atom_name]
A = 1
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
return protein
def lowerCamelCase_ ( lowerCAmelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
'''simple docstring'''
A = tree_map(lambda lowerCAmelCase__ : torch.tensor(lowerCAmelCase__ , device=batch['aatype'].device ) , lowerCAmelCase__ , np.ndarray )
A = tensor_tree_map(lambda lowerCAmelCase__ : np.array(lowerCAmelCase__ ) , make_atomaa_masks(lowerCAmelCase__ ) )
return out
| 106
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
_snake_case = 0
_snake_case = 1
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''generated'''
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
__lowerCAmelCase = {}
if truncation is not None:
__lowerCAmelCase = truncation
__lowerCAmelCase = generate_kwargs
__lowerCAmelCase = {}
if return_tensors is not None and return_type is None:
__lowerCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
return True
def A__ ( self , *snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__lowerCAmelCase = ([prefix + arg for arg in args[0]],)
__lowerCAmelCase = True
elif isinstance(args[0] , snake_case_ ):
__lowerCAmelCase = (prefix + args[0],)
__lowerCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__lowerCAmelCase = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *snake_case_ , **snake_case_ ) -> Dict:
__lowerCAmelCase = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def A__ ( self , snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ ) -> Tuple:
__lowerCAmelCase = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def A__ ( self , snake_case_ , **snake_case_ ) -> Union[str, Any]:
if self.framework == "pt":
__lowerCAmelCase , __lowerCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
__lowerCAmelCase , __lowerCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
__lowerCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__lowerCAmelCase = self.model.generate(**snake_case_ , **snake_case_ )
__lowerCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__lowerCAmelCase = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def A__ ( self , snake_case_ , snake_case_=ReturnType.TEXT , snake_case_=False ) -> Dict:
__lowerCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__lowerCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''summary'''
def __call__( self , *snake_case_ , **snake_case_ ) -> Tuple:
return super().__call__(*snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''translation'''
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def A__ ( self , *snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_=None , snake_case_=None ) -> List[Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__lowerCAmelCase = src_lang
if tgt_lang is not None:
__lowerCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowerCAmelCase = kwargs.get("""task""" , self.task )
__lowerCAmelCase = task.split("""_""" )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__lowerCAmelCase = items[1]
__lowerCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
return super().__call__(*snake_case_ , **snake_case_ )
| 465
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=7 , a__=6 , a__=17 , a__=23 , a__=11 , a__=True , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : List[str] = act_dim
_lowerCamelCase : Union[str, Any] = state_dim
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Any = max_length
_lowerCamelCase : int = is_training
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_lowerCamelCase : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_lowerCamelCase : Dict = floats_tensor((self.batch_size, self.seq_length, 1))
_lowerCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, 1))
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000)
_lowerCamelCase : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length))
_lowerCamelCase : Union[str, Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __snake_case ( self):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __snake_case ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
_lowerCamelCase : Tuple = DecisionTransformerModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
_lowerCamelCase : Any = model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : str = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase__ = ()
UpperCAmelCase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DecisionTransformerModelTester(self)
_lowerCamelCase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37)
def __snake_case ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase)
@slow
def __snake_case ( self):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = DecisionTransformerModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_lowerCAmelCase)
_lowerCamelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : int = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_lowerCAmelCase)] , _lowerCAmelCase)
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = 2 # number of steps of autoregressive prediction we will perform
_lowerCamelCase : int = 10 # defined by the RL environment, may be normalized
_lowerCamelCase : Optional[Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
_lowerCamelCase : List[Any] = model.to(_lowerCAmelCase)
_lowerCamelCase : Union[str, Any] = model.config
torch.manual_seed(0)
_lowerCamelCase : Optional[int] = torch.randn(1 , 1 , config.state_dim).to(device=_lowerCAmelCase , dtype=torch.floataa) # env.reset()
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=_lowerCAmelCase)
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase , dtype=torch.floataa).reshape(1 , 1 , 1)
_lowerCamelCase : Any = state
_lowerCamelCase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=_lowerCAmelCase , dtype=torch.floataa)
_lowerCamelCase : Optional[Any] = torch.zeros(1 , 0 , device=_lowerCAmelCase , dtype=torch.floataa)
_lowerCamelCase : List[Any] = torch.tensor(0 , device=_lowerCAmelCase , dtype=torch.long).reshape(1 , 1)
for step in range(_lowerCAmelCase):
_lowerCamelCase : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCAmelCase)] , dim=1)
_lowerCamelCase : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCAmelCase)] , dim=1)
_lowerCamelCase : List[Any] = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = model(
states=_lowerCAmelCase , actions=_lowerCAmelCase , rewards=_lowerCAmelCase , returns_to_go=_lowerCAmelCase , timesteps=_lowerCAmelCase , attention_mask=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=_lowerCAmelCase , dtype=torch.floataa),
1.0,
False,
{},
)
_lowerCamelCase : Union[str, Any] = action_pred[0, -1]
_lowerCamelCase : Dict = torch.cat([states, state] , dim=1)
_lowerCamelCase : Optional[int] = returns_to_go[0, -1] - reward
_lowerCamelCase : List[str] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_lowerCamelCase : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCAmelCase , dtype=torch.long) * (step + 1)] , dim=1)
| 709
|
import math
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : List[str] = len(lowercase_ )
_lowerCamelCase : Optional[int] = int(math.floor(math.sqrt(lowercase_ ) ) )
_lowerCamelCase : Tuple = 0
while arr[min(lowercase_ , lowercase_ ) - 1] < x:
_lowerCamelCase : Optional[Any] = step
step += int(math.floor(math.sqrt(lowercase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowerCamelCase : int = prev + 1
if prev == min(lowercase_ , lowercase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
_lowerCamelCase = int(input('Enter the number to be searched:\n'))
_lowerCamelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'''Number {x} is at index {res}''')
| 613
| 0
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : List[Any], lowerCAmelCase : Tuple ) -> Union[str, Any]:
# Construct model
if openai_config_file == "":
A = OpenAIGPTConfig()
else:
A = OpenAIGPTConfig.from_json_file(lowerCAmelCase )
A = OpenAIGPTModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Save pytorch-model
A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), lowerCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 699
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699
| 1
|
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple ) -> bool:
return np.array_equal(__snake_case , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ : Dict =v.conjugate().T
SCREAMING_SNAKE_CASE_ : List[str] =v_star.dot(__snake_case )
assert isinstance(__snake_case , np.ndarray )
return (v_star_dot.dot(__snake_case )) / (v_star.dot(__snake_case ))
def SCREAMING_SNAKE_CASE_ ( ) -> None:
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([[1], [2], [3]] )
assert is_hermitian(__snake_case ), f'{a} is not hermitian.'
print(rayleigh_quotient(__snake_case , __snake_case ) )
SCREAMING_SNAKE_CASE_ : Tuple =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__snake_case ), f'{a} is not hermitian.'
assert rayleigh_quotient(__snake_case , __snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 712
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : Tuple =key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE_ : int =new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : Any =s_dict.pop(UpperCAmelCase_ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] =nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> bytes:
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =os.path.basename(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =url.split('''/''' )[-2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ) and not os.path.isfile(UpperCAmelCase_ ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple =open(UpperCAmelCase_ , '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(UpperCAmelCase_ ) as source, open(UpperCAmelCase_ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=UpperCAmelCase_ , unit_divisor=1_0_2_4 ) as loop:
while True:
SCREAMING_SNAKE_CASE_ : Dict =source.read(8_1_9_2 )
if not buffer:
break
output.write(UpperCAmelCase_ )
loop.update(len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : str =open(UpperCAmelCase_ , '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str ) -> Tuple:
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE_ : int =_download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE_ : List[str] =torch.load(UpperCAmelCase_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ : int =original_checkpoint['''dims''']
SCREAMING_SNAKE_CASE_ : Any =original_checkpoint['''model_state_dict''']
SCREAMING_SNAKE_CASE_ : Any =state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(UpperCAmelCase_ )
rename_keys(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : int =state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
SCREAMING_SNAKE_CASE_ : List[str] =WhisperForConditionalGeneration(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0 and not set(UpperCAmelCase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =proj_out_weights
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 431
| 0
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class snake_case_ :
"""simple docstring"""
def __init__(self: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> str:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
__a : List[Any] = img
__a : Any = img.shape[1]
__a : List[str] = img.shape[0]
__a : List[str] = dst_width
__a : int = dst_height
__a : Any = self.src_w / self.dst_w
__a : Optional[int] = self.src_h / self.dst_h
__a : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : List[str] = self.img[self.get_y(__UpperCAmelCase )][self.get_x(__UpperCAmelCase )]
def UpperCAmelCase__ (self: int , __UpperCAmelCase: int ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: int ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ = 800, 600
UpperCAmelCase__ = imread('''image_data/lena.jpg''', 1)
UpperCAmelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 351
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: List[str]=13 , __UpperCAmelCase: Any=7 , __UpperCAmelCase: List[str]=True , __UpperCAmelCase: Optional[int]=True , __UpperCAmelCase: Dict=True , __UpperCAmelCase: Optional[Any]=True , __UpperCAmelCase: Optional[int]=99 , __UpperCAmelCase: Optional[Any]=32 , __UpperCAmelCase: int=5 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Optional[int]=37 , __UpperCAmelCase: int="gelu" , __UpperCAmelCase: Tuple=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Union[str, Any]=512 , __UpperCAmelCase: Optional[Any]=16 , __UpperCAmelCase: List[Any]=2 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: int=4 , ) -> str:
'''simple docstring'''
__a : Tuple = parent
__a : int = batch_size
__a : Optional[int] = seq_length
__a : List[Any] = is_training
__a : Tuple = use_attention_mask
__a : Optional[int] = use_token_type_ids
__a : Tuple = use_labels
__a : str = vocab_size
__a : Union[str, Any] = hidden_size
__a : List[str] = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : Any = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : List[Any] = initializer_range
__a : int = num_choices
def UpperCAmelCase__ (self: Tuple ) -> List[Any]:
'''simple docstring'''
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = None
if self.use_attention_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Dict = config_and_inputs
__a : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Tuple = config_and_inputs
__a : int = True
__a : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = True
snake_case__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ (self: Dict ) -> Union[str, Any]:
'''simple docstring'''
__a : Tuple = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase__ (self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained("roberta-base" , from_pt=__UpperCAmelCase )
__a : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 351
| 1
|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : Tuple = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : Union[str, Any] = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Optional[Any] = numpy_to_pil(lowerCAmelCase )
return images
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase__ : str = images[None, ...]
UpperCAmelCase__ : str = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase__ : Any = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase__ : List[str] = [Image.fromarray(lowerCAmelCase ) for image in images]
return pil_images
| 660
|
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660
| 1
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = f"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowerCamelCase_ = f"Input value of [number={number}] must be > 0"
raise ValueError(lowerCAmelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase_ = int(math.log(number // 3 ,2 ) ) + 2
lowerCamelCase_ = [3, 5]
lowerCamelCase_ = 2
lowerCamelCase_ = 3
for block in range(1 ,lowerCAmelCase__ ):
for _ in range(lowerCAmelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A_ = 0
try:
A_ = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 29
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = to_pil_image(lowercase )
_UpperCAmelCase , _UpperCAmelCase = pil_image.size
_UpperCAmelCase = pytesseract.image_to_data(lowercase ,lang=lowercase ,output_type="""dict""" ,config=lowercase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_UpperCAmelCase = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
_UpperCAmelCase = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCAmelCase = []
for x, y, w, h in zip(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
_UpperCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase ,lowercase ,lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( lowerCAmelCase_ ):
_snake_case : str = ['pixel_values']
def __init__( self : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : float = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[float, Iterable[float]] = None , __lowerCAmelCase : Union[float, Iterable[float]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "" , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_value
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
_UpperCAmelCase = apply_ocr
_UpperCAmelCase = ocr_lang
_UpperCAmelCase = tesseract_config
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ):
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ):
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, Iterable[float]] , __lowerCAmelCase : Union[float, Iterable[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ):
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Union[float, Iterable[float]] = None , __lowerCAmelCase : Union[float, Iterable[float]] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCAmelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
for image in images:
_UpperCAmelCase , _UpperCAmelCase = apply_tesseract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
words_batch.append(__lowerCAmelCase )
boxes_batch.append(__lowerCAmelCase )
if do_resize:
_UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=__lowerCAmelCase )
if apply_ocr:
_UpperCAmelCase = words_batch
_UpperCAmelCase = boxes_batch
return data
| 275
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(int(lowercase ) for c in str(abs(lowercase ) ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
_UpperCAmelCase = f'''{func.__name__}({value})'''
_UpperCAmelCase = timeit(f'''__main__.{call}''' ,setup="""import __main__""" )
print(f'''{call:56} = {func(lowercase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase ,lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 275
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : List[Any] = """masked_bert"""
def __init__( self : List[str] , _lowerCamelCase : Optional[Any]=3_0_5_2_2 , _lowerCamelCase : Optional[Any]=7_6_8 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Optional[Any]=1_2 , _lowerCamelCase : Any=3_0_7_2 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : str=5_1_2 , _lowerCamelCase : int=2 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : Tuple=1E-12 , _lowerCamelCase : Any=0 , _lowerCamelCase : int="topK" , _lowerCamelCase : Tuple="constant" , _lowerCamelCase : str=0.0 , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Any = pruning_method
__lowerCamelCase : Any = mask_init
__lowerCamelCase : str = mask_scale
| 519
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = [False] * len(__lowerCAmelCase )
lowerCAmelCase_ = [-1] * len(__lowerCAmelCase )
def dfs(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
lowerCAmelCase_ = True
lowerCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCAmelCase , 1 - c )
for i in range(len(__lowerCAmelCase ) ):
if not visited[i]:
dfs(__lowerCAmelCase , 0 )
for i in range(len(__lowerCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 279
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''transfo-xl'''
_lowercase =['''mems''']
_lowercase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=267_735 , _UpperCamelCase=[20_000, 40_000, 200_000] , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=4_096 , _UpperCamelCase=4 , _UpperCamelCase=False , _UpperCamelCase=18 , _UpperCamelCase=1_600 , _UpperCamelCase=1_000 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=-1 , _UpperCamelCase=True , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="normal" , _UpperCamelCase=0.01 , _UpperCamelCase=0.01 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(_UpperCamelCase )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> List[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , _UpperCamelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 279
| 1
|
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( a__):
"""simple docstring"""
_A = 'microsoft/speecht5_tts'
_A = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_A = 'text_reader'
_A = SpeechTaProcessor
_A = SpeechTaForTextToSpeech
_A = SpeechTaHifiGan
_A = ['text']
_A = ['audio']
def _a (self ):
'''simple docstring'''
if self.post_processor is None:
lowerCamelCase = "microsoft/speecht5_hifigan"
super().setup()
def _a (self , __a , __a=None ):
'''simple docstring'''
lowerCamelCase = self.pre_processor(text=_snake_case , return_tensors="pt" , truncation=_snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCamelCase = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowerCamelCase = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a (self , __a ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_snake_case )
def _a (self , __a ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_snake_case ).cpu().detach()
| 623
|
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0.0 , UpperCamelCase__ = None , UpperCamelCase__ = "geglu" , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = "layer_norm" , UpperCamelCase__ = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = only_cross_attention
lowerCamelCase_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase_ = AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ = AdaLayerNormZero(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = Attention(
query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_SCREAMING_SNAKE_CASE , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase_ = (
AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
)
lowerCamelCase_ = Attention(
query_dim=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , upcast_attention=_SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
# 3. Feed-forward
lowerCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = FeedForward(_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , final_dropout=_SCREAMING_SNAKE_CASE )
# let chunk size default to None
lowerCamelCase_ = None
lowerCamelCase_ = 0
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = chunk_size
lowerCamelCase_ = dim
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> str:
'''simple docstring'''
if self.use_ada_layer_norm:
lowerCamelCase_ = self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.norma(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase_ = self.norma(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase_ = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase_ = (
self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(_SCREAMING_SNAKE_CASE )
)
lowerCamelCase_ = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase_ = self.norma(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCamelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase_ = torch.cat(
[self.ff(_SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(_SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase_ = self.ff(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase_ = ff_output + hidden_states
return hidden_states
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 4 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = "geglu" , UpperCamelCase__ = False , ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = int(dim * mult )
lowerCamelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase_ = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
lowerCamelCase_ = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase_ = GEGLU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
lowerCamelCase_ = ApproximateGELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.ModuleList([] )
# project in
self.net.append(_SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for module in self.net:
lowerCamelCase_ = module(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = "none" ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = approximate
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.proj(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.gelu(_SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , dim_out * 2 )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.proj(_SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_SCREAMING_SNAKE_CASE )
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.proj(_SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , embedding_dim * 2 )
lowerCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE ) ) )
lowerCamelCase_ , lowerCamelCase_ = torch.chunk(_SCREAMING_SNAKE_CASE , 2 )
lowerCamelCase_ = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = CombinedTimestepLabelEmbeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.SiLU()
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE , eps=1e-6 )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=_SCREAMING_SNAKE_CASE ) ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = emb.chunk(6 , dim=1 )
lowerCamelCase_ = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 1e-5 ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = num_groups
lowerCamelCase_ = eps
if act_fn is None:
lowerCamelCase_ = None
else:
lowerCamelCase_ = get_activation(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , out_dim * 2 )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if self.act:
lowerCamelCase_ = self.act(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.linear(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ = emb[:, :, None, None]
lowerCamelCase_ , lowerCamelCase_ = emb.chunk(2 , dim=1 )
lowerCamelCase_ = F.group_norm(_SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps )
lowerCamelCase_ = x * (1 + scale) + shift
return x
| 721
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 256
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = cva.imread(UpperCamelCase__ , 0 )
lowerCamelCase_ = copy.deepcopy(self.img )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowerCamelCase_ = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase_ = x[i] / self.k
self.sk += prk
lowerCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase_ = int(last % last )
lowerCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
lowerCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase_ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowercase : List[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowercase : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 66
| 0
|
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 570
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str )-> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
UpperCamelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
UpperCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCamelCase = c.n_embd + 1 # int
UpperCamelCase = c.resid_pdrop + 1.0 # float
UpperCamelCase = not c.scale_attn_weights # bool
UpperCamelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[str]:
"""simple docstring"""
UpperCamelCase = PretrainedConfig()
UpperCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
UpperCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(UpperCAmelCase_ )}." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = AutoConfig.from_pretrained("bert-base-cased" )
UpperCamelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCamelCase = ["config.42.0.0.json"]
UpperCamelCase = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) )
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict:
"""simple docstring"""
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCamelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
UpperCamelCase = "v4.0.0"
UpperCamelCase , UpperCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCamelCase = "v3.0.0"
UpperCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 554
| 0
|
"""simple docstring"""
import itertools
import math
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(lowerCAmelCase_ ):
yield num
num += 1
def UpperCAmelCase__ (lowerCAmelCase_ = 1_0001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 707
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=1_3 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]=5_1_2 , UpperCAmelCase__ : str=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : str=None , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , ) -> Dict:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , ) -> List[str]:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : Any ) -> str:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case__ : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case__ : int = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : int = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = LlamaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "single_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : str ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "multi_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
__SCREAMING_SNAKE_CASE = original_model(UpperCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE = {"type": scaling_type, "factor": 10.0}
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
__SCREAMING_SNAKE_CASE = scaled_model(UpperCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCAmelCase_ ( self : int ) -> str:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__SCREAMING_SNAKE_CASE = "Simply put, the theory of relativity states that "
__SCREAMING_SNAKE_CASE = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
__SCREAMING_SNAKE_CASE = model.generate(UpperCAmelCase__ , max_new_tokens=6_4 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 553
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = "cpu"
SCREAMING_SNAKE_CASE__ : Any = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
SCREAMING_SNAKE_CASE__ : Optional[int] = "path-to-your-trained-model"
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE__ : Dict = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : int = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE__ : int = torch.randn(2, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[str] = torch.rand(1) * 9_99
SCREAMING_SNAKE_CASE__ : Tuple = torch.randn(2, 77, 7_68)
SCREAMING_SNAKE_CASE__ : int = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE__ : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE__ : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6_66
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE__ : Dict = {"generator": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE__ : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE__ : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 205
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ = FunnelBaseModel(SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 205
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[int] ) -> Optional[int]:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = kwargs.get("model_save_dir" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = kwargs.get("latest_model_name" , __magic_name__ )
def __call__( self : str , **__magic_name__ : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {k: np.array(__magic_name__ ) for k, v in kwargs.items()}
return self.model.run(__magic_name__ , __magic_name__ )
@staticmethod
def __A ( __magic_name__ : Union[str, Path] , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None ) -> Optional[Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
SCREAMING_SNAKE_CASE_ = "CPUExecutionProvider"
return ort.InferenceSession(__magic_name__ , providers=[provider] , sess_options=__magic_name__ )
def __A ( self : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[str] = None , **__magic_name__ : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(__magic_name__ )
if src_path.exists():
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
def __A ( self : str , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : int , ) -> Any:
if os.path.isfile(__magic_name__ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
# saving model weights/files
self._save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[Union[bool, str, None]] = None , __magic_name__ : Optional[Union[str, None]] = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional["ort.SessionOptions"] = None , **__magic_name__ : Any , ) -> Dict:
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(
os.path.join(__magic_name__ , __magic_name__ ) , provider=__magic_name__ , sess_options=__magic_name__ )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id=__magic_name__ , filename=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).parent
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ).name
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(__magic_name__ , provider=__magic_name__ , sess_options=__magic_name__ )
return cls(model=__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , **__magic_name__ : List[str] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if len(str(__magic_name__ ).split("@" ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_id.split("@" )
return cls._from_pretrained(
model_id=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , use_auth_token=__magic_name__ , **__magic_name__ , )
| 719
|
from math import factorial
def a__ ( __UpperCamelCase = 1_0_0 ):
return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 356
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase : Any = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("-f")
__UpperCamelCase : str = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict="eval") -> List[Any]:
'''simple docstring'''
__UpperCamelCase : str = os.path.join(_lowerCamelCase , F'{split}_results.json')
if os.path.exists(_lowerCamelCase):
with open(_lowerCamelCase , "r") as f:
return json.load(_lowerCamelCase)
raise ValueError(F'can\'t find {path}')
lowercase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( lowercase__):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> List[str]:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
__UpperCamelCase : Tuple = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
__UpperCamelCase : Tuple = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : int = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
__UpperCamelCase : Dict = get_results(SCREAMING_SNAKE_CASE_ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _lowerCamelCase ( self :List[str] ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
__UpperCamelCase : str = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : int = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase : Dict = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
__UpperCamelCase : Tuple = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _lowerCamelCase ( self :str ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
__UpperCamelCase : str = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 557
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase = model.generate(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase = model_reloaded.generate(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
| 535
| 0
|
def UpperCamelCase ( lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case : Optional[Any] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__=2 ,UpperCAmelCase__=3 ,UpperCAmelCase__=16 ,UpperCAmelCase__ = 10 ,UpperCAmelCase__ = 2 ):
"""simple docstring"""
def get_dataset(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(UpperCAmelCase__ ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
_SCREAMING_SNAKE_CASE = get_dataset(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = get_dataset(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = DataLoader(UpperCAmelCase__ ,shuffle=UpperCAmelCase__ ,batch_size=UpperCAmelCase__ ,num_workers=4 )
_SCREAMING_SNAKE_CASE = DataLoader(UpperCAmelCase__ ,shuffle=UpperCAmelCase__ ,batch_size=UpperCAmelCase__ ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for epoch in range(UpperCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = batch
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(UpperCAmelCase__ ,UpperCAmelCase__ )
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def __magic_name__ ( self , A_ )-> Union[str, Any]:
return x * self.a + self.b
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self )-> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
_SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=A_ , automatic_checkpoint_naming=A_ )
# Train baseline
_SCREAMING_SNAKE_CASE = Accelerator(project_config=A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __magic_name__ ( self )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
_SCREAMING_SNAKE_CASE = Accelerator()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
_SCREAMING_SNAKE_CASE = os.path.join(A_ , 'initial' )
accelerator.save_state(A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
_SCREAMING_SNAKE_CASE = train(3 , A_ , A_ , A_ , A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
_SCREAMING_SNAKE_CASE = Accelerator()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
_SCREAMING_SNAKE_CASE = train(2 , A_ , A_ , A_ , A_ )
# Save everything
_SCREAMING_SNAKE_CASE = os.path.join(A_ , 'checkpoint' )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1 , A_ , A_ , A_ , A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def __magic_name__ ( self )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
_SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
_SCREAMING_SNAKE_CASE = Accelerator(project_dir=A_ , project_config=A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
_SCREAMING_SNAKE_CASE = train(3 , A_ , A_ , A_ , A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
_SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A_ )
_SCREAMING_SNAKE_CASE = Accelerator(project_dir=A_ , project_config=A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(os.path.join(A_ , 'checkpoints' , 'checkpoint_0' ) )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
_SCREAMING_SNAKE_CASE = train(2 , A_ , A_ , A_ , A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , A_ , A_ , A_ , A_ )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
_SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
_SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_ , A_ , A_ , A_ )
_SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __magic_name__ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(A_ , step_size=1 , gamma=0.99 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = dummy_dataloaders()
_SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
_SCREAMING_SNAKE_CASE = Accelerator(project_dir=A_ , project_config=A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
_SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , A_ , A_ , A_ , A_ , A_ )
self.assertNotEqual(A_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(A_ , scheduler.state_dict() )
def __magic_name__ ( self )-> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE = DummyModel()
_SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=A_ , total_limit=2 )
# Train baseline
_SCREAMING_SNAKE_CASE = Accelerator(project_dir=A_ , project_config=A_ )
_SCREAMING_SNAKE_CASE = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __magic_name__ ( self )-> Tuple:
_SCREAMING_SNAKE_CASE = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
snake_case : str = '/tmp/accelerate/state_checkpointing'
snake_case : Any = DummyModel()
snake_case : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case : Optional[int] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case , snake_case : str = dummy_dataloaders()
snake_case : str = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case : Optional[int] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case , snake_case , snake_case , snake_case , snake_case : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case , snake_case : Union[str, Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
snake_case : List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
snake_case : Dict = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
snake_case : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 605
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if len(UpperCAmelCase__ ) == 0:
return array
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
# Compute the variables
_SCREAMING_SNAKE_CASE = _max - _min + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_SCREAMING_SNAKE_CASE = i - _min
_SCREAMING_SNAKE_CASE = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_SCREAMING_SNAKE_CASE = 0
for i in range(UpperCAmelCase__ ):
while holes_repeat[i] > 0:
_SCREAMING_SNAKE_CASE = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Dict = input('Enter numbers separated by comma:\n')
snake_case : List[Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 605
| 1
|
"""simple docstring"""
import os
def __UpperCAmelCase ( UpperCAmelCase_ : str = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as input_file:
__snake_case : Any = [
[int(UpperCAmelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__snake_case : Any = len(UpperCAmelCase_ )
__snake_case : Optional[int] = len(matrix[0] )
__snake_case : Any = [[-1 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
__snake_case : Any = matrix[i][0]
for j in range(1 , UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
__snake_case : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_ ):
__snake_case : Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__snake_case : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 192
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
def __init__(self : Tuple , _A : List[Any] , _A : int=sys.maxsize) -> int:
__snake_case : Dict = 'bilinear'
__snake_case : int = max_size
__snake_case : Any = short_edge_length
def __call__(self : Optional[Any] , _A : Any) -> Dict:
__snake_case : Any = []
for img in imgs:
__snake_case , __snake_case : int = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case : int = size * 1.0 / min(_A , _A)
if h < w:
__snake_case , __snake_case : str = size, scale * w
else:
__snake_case , __snake_case : List[str] = scale * h, size
if max(_A , _A) > self.max_size:
__snake_case : str = self.max_size * 1.0 / max(_A , _A)
__snake_case : Any = newh * scale
__snake_case : Any = neww * scale
__snake_case : Optional[int] = int(neww + 0.5)
__snake_case : str = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case : str = Image.fromarray(_A)
__snake_case : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case : Tuple = np.asarray(_A)
else:
__snake_case : str = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case : Dict = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A).squeeze(0)
img_augs.append(_A)
return img_augs
class UpperCamelCase :
def __init__(self : List[str] , _A : List[str]) -> Optional[int]:
__snake_case : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case : Union[str, Any] = cfg.INPUT.FORMAT
__snake_case : List[Any] = cfg.SIZE_DIVISIBILITY
__snake_case : Tuple = cfg.PAD_VALUE
__snake_case : int = cfg.INPUT.MAX_SIZE_TEST
__snake_case : List[Any] = cfg.MODEL.DEVICE
__snake_case : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Union[str, Any] = lambda _A: (x - self.pixel_mean) / self.pixel_std
def _lowercase (self : Tuple , _A : str) -> int:
__snake_case : str = tuple(max(_A) for s in zip(*[img.shape for img in images]))
__snake_case : int = [im.shape[-2:] for im in images]
__snake_case : int = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A)
]
return torch.stack(_A), torch.tensor(_A)
def __call__(self : Optional[Any] , _A : Dict , _A : Union[str, Any]=False) -> Optional[int]:
with torch.no_grad():
if not isinstance(_A , _A):
__snake_case : str = [images]
if single_image:
assert len(_A) == 1
for i in range(len(_A)):
if isinstance(images[i] , torch.Tensor):
images.insert(_A , images.pop(_A).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case : Any = torch.tensor([im.shape[:2] for im in images])
__snake_case : Tuple = self.aug(_A)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case : List[str] = [self.normalizer(_A) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case : List[Any] = self.pad(_A)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case : int = torch.true_divide(_A , _A)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase_ )
| 192
| 1
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerTokenizer
__lowerCAmelCase = True
__lowerCAmelCase = LongformerTokenizerFast
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__a : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a : Optional[Any] = {'''unk_token''': '''<unk>'''}
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[Any] = '''lower newer'''
__a : Any = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : List[str] = '''lower newer'''
__a : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__a : Optional[int] = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__a : Optional[Any] = tokens + [tokenizer.unk_token]
__a : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def _lowerCamelCase ( self ):
__a : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__a : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
__a : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
__a : Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__a : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__a : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self ):
__a : Dict = self.get_tokenizer()
__a : Any = '''Encode this sequence.'''
__a : Any = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__a : List[Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
__a : List[Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
__a : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__a : int = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__a : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing spaces after special tokens
__a : Tuple = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space
__a : List[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
__a : Dict = '''Encode <mask> sequence'''
__a : Union[str, Any] = '''Encode <mask>sequence'''
__a : Union[str, Any] = tokenizer.encode(lowerCamelCase__ )
__a : Optional[Any] = encoded.index(lowerCamelCase__ )
__a : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
__a : List[Any] = tokenizer.encode(lowerCamelCase__ )
__a : str = encoded.index(lowerCamelCase__ )
__a : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__a : int = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__a : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
__a : Optional[int] = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
__a : List[Any] = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__a : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__a : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _lowerCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase__ )
def _lowerCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : List[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__a : List[str] = f"""{text_of_1_token} {text_of_1_token}"""
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : Optional[int] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : int = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : List[str] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : Optional[int] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : str = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
__a : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
__a : List[str] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
| 52
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , ):
if attention_mask is None:
A__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
A__ = initializer_range
def A (self ):
"""simple docstring"""
A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A__ = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
A__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
A__ = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def A (self ):
"""simple docstring"""
A__ ,A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = 2_0
A__ = model_class_name(lowerCamelCase__ )
A__ = model.encode(inputs_dict["""input_ids"""] )
A__ ,A__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
A__ = model.decode(lowerCamelCase__ , lowerCamelCase__ )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = 2_0
A__ = model_class_name(lowerCamelCase__ )
A__ = model.encode(inputs_dict["""input_ids"""] )
A__ ,A__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
A__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
A__ = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class _UpperCamelCase ( unittest.TestCase):
__lowerCamelCase = 9_9
def A (self ):
"""simple docstring"""
A__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
A__ = input_ids.shape[0]
A__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A (self ):
"""simple docstring"""
A__ ,A__ ,A__ = self._get_config_and_data()
A__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
A__ = lm_model(input_ids=lowerCamelCase__ )
A__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
A__ = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
A__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
A__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
A__ = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
A__ = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
A__ = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
A__ = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCamelCase ( __snake_case , unittest.TestCase , __snake_case):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def A (self ):
"""simple docstring"""
A__ = FlaxBlenderbotModelTester(self )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
A__ = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
A__ = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = model_class(lowerCamelCase__ )
A__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
A__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest("""JIT Enabled""" ):
A__ = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A__ = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A__ = np.ones((1, 1) ) * model.config.eos_token_id
A__ = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def A (self ):
"""simple docstring"""
A__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
A__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
A__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCamelCase__ )
A__ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
A__ = ["""Sam"""]
A__ = tokenizer(lowerCamelCase__ , return_tensors="""jax""" )
A__ = model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
A__ = """Sam is a great name. It means \"sun\" in Gaelic."""
A__ = tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 574
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
"""simple docstring"""
A__ : Optional[str] = field(
default=_snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_snake_case )} , )
A__ : Optional[str] = field(
default=_snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A__ : bool = field(
default=_snake_case , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
A__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A__ : bool = field(
default=_snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __UpperCAmelCase ( self :List[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class a__ :
"""simple docstring"""
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A__ : Optional[str] = field(default=_snake_case , metadata={'''help''': '''The input training data file (a text file).'''} )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
A__ : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
A__ : bool = field(
default=_snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A__ : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
A__ : Optional[int] = field(
default=_snake_case , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
A__ : Optional[int] = field(
default=_snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
A__ : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
A__ : bool = field(
default=_snake_case , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ):
if self.train_file is not None:
lowercase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowercase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowercase = {c: dataset[c] for c in dataset.column_names}
lowercase = refs
return Dataset.from_dict(_UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowercase = {}
if data_args.train_file is not None:
lowercase = data_args.train_file
if data_args.validation_file is not None:
lowercase = data_args.validation_file
lowercase = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowercase = 'text'
lowercase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
lowercase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowercase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowercase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase = datasets['train'].column_names
else:
lowercase = datasets['validation'].column_names
lowercase = 'text' if 'text' in column_names else column_names[0]
lowercase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase ):
# Remove empty lines
lowercase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
lowercase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase = model_args.model_name_or_path
else:
lowercase = None
lowercase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase = trainer.evaluate()
lowercase = math.exp(eval_output['eval_loss'] )
lowercase = perplexity
lowercase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 314
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = PriorTransformer
__magic_name__ = 'hidden_states'
@property
def a_ ( self ):
snake_case = 4
snake_case = 8
snake_case = 7
snake_case = floats_tensor((batch_size, embedding_dim) ).to(__snake_case )
snake_case = floats_tensor((batch_size, embedding_dim) ).to(__snake_case )
snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a_ ( self , __snake_case=0 ):
torch.manual_seed(__snake_case )
snake_case = 4
snake_case = 8
snake_case = 7
snake_case = torch.randn((batch_size, embedding_dim) ).to(__snake_case )
snake_case = torch.randn((batch_size, embedding_dim) ).to(__snake_case )
snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def a_ ( self ):
return (4, 8)
@property
def a_ ( self ):
return (4, 8)
def a_ ( self ):
snake_case = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
snake_case = self.dummy_input
return init_dict, inputs_dict
def a_ ( self ):
snake_case , snake_case = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__snake_case )
snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def a_ ( self ):
snake_case , snake_case = self.prepare_init_args_and_inputs_for_common()
snake_case = self.model_class(**__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , __snake_case )
def a_ ( self ):
snake_case = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
snake_case = model.to(__snake_case )
if hasattr(__snake_case , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
snake_case = self.get_dummy_seed_input()
with torch.no_grad():
snake_case = model(**__snake_case )[0]
snake_case = output[0, :5].flatten().cpu()
print(__snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(__snake_case , __snake_case , rtol=1E-2 ) )
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self , __snake_case=1 , __snake_case=7_6_8 , __snake_case=7_7 , __snake_case=0 ):
torch.manual_seed(__snake_case )
snake_case = batch_size
snake_case = embedding_dim
snake_case = num_embeddings
snake_case = torch.randn((batch_size, embedding_dim) ).to(__snake_case )
snake_case = torch.randn((batch_size, embedding_dim) ).to(__snake_case )
snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def a_ ( self , __snake_case , __snake_case ):
snake_case = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(__snake_case )
snake_case = self.get_dummy_seed_input(seed=__snake_case )
with torch.no_grad():
snake_case = model(**__snake_case )[0]
assert list(sample.shape ) == [1, 7_6_8]
snake_case = sample[0, :8].flatten().cpu()
print(__snake_case )
snake_case = torch.tensor(__snake_case )
assert torch_all_close(__snake_case , __snake_case , atol=1E-3 )
| 550
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 550
| 1
|
from math import pi, sqrt
def UpperCAmelCase__ ( lowercase__ ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowercase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowercase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCAmelCase__ ( ) -> None:
assert gamma(0.5 ) == sqrt(lowercase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = 1.0
while num:
UpperCamelCase__ = float(input("Gamma of: "))
print(F"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 634
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634
| 1
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> int:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , model.state_dict())
UpperCamelCase = os.path.join(lowerCamelCase_ , '''index.json''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(lowerCamelCase_ , F'{key}.dat')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=lowerCamelCase_)
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(lowerCamelCase_ , '''weight''' , lowerCamelCase_ , {})
UpperCamelCase = os.path.join(lowerCamelCase_ , '''weight.dat''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
self.assertDictEqual(lowerCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCamelCase_).split('''.''')[1]}})
UpperCamelCase = load_offloaded_weight(lowerCamelCase_ , index['''weight'''])
self.assertTrue(torch.equal(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1''': 0, '''a.2''': 2})
UpperCamelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2})
| 34
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase):
def __A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Tuple = (32, 32)
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase__ )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
def extract(*UpperCamelCase__ : str , **UpperCamelCase__ : str ):
class lowercase__ :
def __init__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.ones([0] )
def __A ( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
self.pixel_values.to(UpperCamelCase__ )
return self
return Out()
return extract
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : Dict = 77
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_image.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Any = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.dummy_vae
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE : Tuple = 77
SCREAMING_SNAKE_CASE : Dict = self.dummy_image.to(UpperCamelCase__ )
# put models in fp16
SCREAMING_SNAKE_CASE : int = unet.half()
SCREAMING_SNAKE_CASE : Optional[int] = vae.half()
SCREAMING_SNAKE_CASE : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = alt_pipe(
[prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE : Any = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : int = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE : int = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE : Any = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Dict = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
import math
from datetime import datetime, timedelta
def snake_case( __magic_name__ ) -> datetime:
'''simple docstring'''
lowercase : Dict = year % 19
lowercase : Optional[Any] = year % 4
lowercase : Optional[int] = year % 7
lowercase : Union[str, Any] = math.floor(year / 1_00 )
lowercase : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase : Union[str, Any] = leap_day_inhibits / 4
lowercase : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 18 )
else:
return datetime(__lowerCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase_ = """will be""" if year > datetime.now().year else """was"""
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 217
|
_SCREAMING_SNAKE_CASE : str = 8.3_144_598
def _lowercase ( __lowerCamelCase : float ,__lowerCamelCase : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_SCREAMING_SNAKE_CASE : List[str] = 300
_SCREAMING_SNAKE_CASE : Any = 28
_SCREAMING_SNAKE_CASE : Any = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 344
| 0
|
"""simple docstring"""
def A_ (__a , __a ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A_ = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
A_ = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
A_ = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
from collections import deque
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int ) -> None:
"""simple docstring"""
A_ = process_name # process name
A_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A_ = arrival_time
A_ = burst_time # remaining burst time
A_ = 0 # total time of the process wait in ready queue
A_ = 0 # time from arrival time to completion time
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : list[int] , _snake_case : deque[Process] , _snake_case : int , ) -> None:
"""simple docstring"""
# total number of mlfq's queues
A_ = number_of_queues
# time slice of queues that round robin algorithm applied
A_ = time_slices
# unfinished process is in this ready_queue
A_ = queue
# current time
A_ = current_time
# finished process is in this sequence queue
A_ = deque()
def lowerCamelCase__ ( self : List[str] ) -> list[str]:
"""simple docstring"""
A_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase__ ( self : str , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase__ ( self : Tuple , _snake_case : list[Process] ) -> list[int]:
"""simple docstring"""
A_ = []
for i in range(len(_snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase__ ( self : Optional[int] , _snake_case : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase__ ( self : int , _snake_case : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase__ ( self : Dict , _snake_case : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A_ = deque() # sequence deque of finished process
while len(_snake_case ) != 0:
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A_ = 0
# set the process's turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# set the completion time
A_ = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase__ ( self : List[str] , _snake_case : deque[Process] , _snake_case : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case ) ):
A_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A_ = 0
# set the finish time
A_ = self.current_time
# update the process' turnaround time because it is finished
A_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase__ ( self : Any ) -> deque[Process]:
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
A_ , A_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase_ : Tuple = Process('''P1''', 0, 53)
UpperCamelCase_ : List[Any] = Process('''P2''', 0, 17)
UpperCamelCase_ : Optional[int] = Process('''P3''', 0, 68)
UpperCamelCase_ : Union[str, Any] = Process('''P4''', 0, 24)
UpperCamelCase_ : Any = 3
UpperCamelCase_ : List[str] = [17, 25]
UpperCamelCase_ : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase_ : str = Process('''P1''', 0, 53)
UpperCamelCase_ : Dict = Process('''P2''', 0, 17)
UpperCamelCase_ : List[Any] = Process('''P3''', 0, 68)
UpperCamelCase_ : int = Process('''P4''', 0, 24)
UpperCamelCase_ : Dict = 3
UpperCamelCase_ : str = [17, 25]
UpperCamelCase_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
UpperCamelCase_ : str = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 482
| 0
|
from collections import deque
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Any , a :str , a :int , a :int ) -> None:
__UpperCamelCase : Optional[int] = process_name # process name
__UpperCamelCase : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCamelCase : Optional[Any] = arrival_time
__UpperCamelCase : Dict = burst_time # remaining burst time
__UpperCamelCase : str = 0 # total time of the process wait in ready queue
__UpperCamelCase : int = 0 # time from arrival time to completion time
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :int , a :list[int] , a :deque[Process] , a :int , ) -> None:
# total number of mlfq's queues
__UpperCamelCase : Optional[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCamelCase : List[str] = time_slices
# unfinished process is in this ready_queue
__UpperCamelCase : int = queue
# current time
__UpperCamelCase : List[Any] = current_time
# finished process is in this sequence queue
__UpperCamelCase : deque[Process] = deque()
def _lowerCamelCase ( self :List[Any] ) -> list[str]:
__UpperCamelCase : Dict = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowerCamelCase ( self :Dict , a :list[Process] ) -> list[int]:
__UpperCamelCase : Optional[Any] = []
for i in range(len(a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowerCamelCase ( self :List[Any] , a :list[Process] ) -> list[int]:
__UpperCamelCase : List[str] = []
for i in range(len(a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowerCamelCase ( self :Optional[Any] , a :list[Process] ) -> list[int]:
__UpperCamelCase : List[Any] = []
for i in range(len(a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowerCamelCase ( self :List[str] , a :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def _lowerCamelCase ( self :int , a :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowerCamelCase ( self :List[str] , a :deque[Process] ) -> deque[Process]:
__UpperCamelCase : deque[Process] = deque() # sequence deque of finished process
while len(a ) != 0:
__UpperCamelCase : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCamelCase : Union[str, Any] = 0
# set the process's turnaround time because it is finished
__UpperCamelCase : int = self.current_time - cp.arrival_time
# set the completion time
__UpperCamelCase : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(a )
self.finish_queue.extend(a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowerCamelCase ( self :Tuple , a :deque[Process] , a :int ) -> tuple[deque[Process], deque[Process]]:
__UpperCamelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a ) ):
__UpperCamelCase : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCamelCase : List[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCamelCase : int = 0
# set the finish time
__UpperCamelCase : Tuple = self.current_time
# update the process' turnaround time because it is finished
__UpperCamelCase : Union[str, Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a )
self.finish_queue.extend(a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowerCamelCase ( self :int ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowercase : Dict = Process('P1', 0, 53)
lowercase : Tuple = Process('P2', 0, 17)
lowercase : Dict = Process('P3', 0, 68)
lowercase : Tuple = Process('P4', 0, 24)
lowercase : List[Any] = 3
lowercase : Any = [17, 25]
lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowercase : int = Process('P1', 0, 53)
lowercase : Dict = Process('P2', 0, 17)
lowercase : List[Any] = Process('P3', 0, 68)
lowercase : Union[str, Any] = Process('P4', 0, 24)
lowercase : Tuple = 3
lowercase : Union[str, Any] = [17, 25]
lowercase : List[Any] = deque([Pa, Pa, Pa, Pa])
lowercase : List[str] = MLFQ(number_of_queues, time_slices, queue, 0)
lowercase : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 557
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Dict , *a :Tuple , **a :List[Any] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , a , )
super().__init__(*a , **a )
| 557
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
UpperCAmelCase = parser.parse_args()
main(args)
| 531
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
snake_case_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
snake_case_ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
snake_case_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase__ ( self ):
pass
| 531
| 1
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : str ) -> bool:
'''simple docstring'''
lowercase =0
for ch in input_str:
lowercase =ord(lowercase_ )
lowercase =pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase( lowerCamelCase ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase( self ):
raise NotImplementedError()
| 368
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for step in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "schedule.bin" )
torch.save(scheduler.state_dict() , SCREAMING_SNAKE_CASE__ )
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase , weight_decay=0.0 , relative_step=lowerCamelCase , scale_parameter=lowerCamelCase , warmup_init=lowerCamelCase , )
for _ in range(1_000 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 10
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase , msg=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_snake_case = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_snake_case , _snake_case = data
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_snake_case = unwrap_schedule(lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase , lowerCamelCase , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase ) # wrap to test picklability of the schedule
_snake_case = unwrap_and_save_reload_schedule(lowerCamelCase , self.num_steps )
self.assertListEqual(lowerCamelCase , lowerCamelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = fn
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
return self.fn(*lowerCamelCase , **lowerCamelCase )
@classmethod
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = list(map(self , scheduler.lr_lambdas ) )
| 368
| 1
|
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowercase_ = list(snake_case__ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowercase_ = i
lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 97
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
A_ = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , SCREAMING_SNAKE_CASE , )
is not None
):
A_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
A_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
A_ = True
if not attribute_used:
A_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A_ = True
elif attribute.endswith('''_token_id''' ):
A_ = True
# configuration class specific cases
if not case_allowed:
A_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = dict(inspect.signature(config_class.__init__ ).parameters )
A_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
A_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A_ = {}
if len(config_class.attribute_map ) > 0:
A_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A_ = inspect.getsourcefile(SCREAMING_SNAKE_CASE )
A_ = os.path.dirname(SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A_ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for fn in os.listdir(SCREAMING_SNAKE_CASE ) if fn.startswith('''modeling_''' )]
# Get the source code strings
A_ = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
A_ = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
A_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(SCREAMING_SNAKE_CASE )
and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and inspect.getmodule(SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A_ = check_config_attributes_being_used(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = unused_attributes
if len(SCREAMING_SNAKE_CASE ) > 0:
A_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 203
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowercase :
def __init__( self , _UpperCAmelCase = "cpu" , _UpperCAmelCase = "openai/clip-vit-large-patch14" ):
A : List[Any] = device
A : Optional[int] = CLIPTokenizerFast.from_pretrained(_UpperCAmelCase )
A : Dict = [0.48145466, 0.4578275, 0.40821073]
A : List[str] = [0.26862954, 0.26130258, 0.27577711]
A : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A : Dict = torchvision.transforms.Resize(224 )
A : List[str] = torchvision.transforms.CenterCrop(224 )
def snake_case ( self , _UpperCAmelCase ):
A : Any = self.resize(_UpperCAmelCase )
A : Optional[int] = self.center_crop(_UpperCAmelCase )
A : List[Any] = self.normalize(_UpperCAmelCase )
return images
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
A : Any = self.tokenizer(text=_UpperCAmelCase , **_UpperCAmelCase )
A : List[str] = self.preprocess_img(_UpperCAmelCase )
A : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowercase ( nn.Module ):
def __init__( self , _UpperCAmelCase=10 , _UpperCAmelCase=0.01 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="image" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ):
super().__init__()
A : Any = None
A : Dict = device if device else get_device()
if vqgan:
A : Union[str, Any] = vqgan
else:
A : List[str] = load_vqgan(self.device , conf_path=_UpperCAmelCase , ckpt_path=_UpperCAmelCase )
self.vqgan.eval()
if clip:
A : str = clip
else:
A : Tuple = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
A : Dict = ProcessorGradientFlow(device=self.device )
A : Optional[Any] = iterations
A : str = lr
A : Dict = log
A : Optional[Any] = make_grid
A : Union[str, Any] = return_val
A : Tuple = quantize
A : Optional[Any] = self.vqgan.decoder.z_shape
def snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=5 , _UpperCAmelCase=True ):
A : Any = []
if output_path is None:
A : List[str] = '''./animation.gif'''
if input_path is None:
A : Union[str, Any] = self.save_path
A : Optional[int] = sorted(glob(input_path + '''/*''' ) )
if not len(_UpperCAmelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_UpperCAmelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
A : Optional[int] = total_duration / len(_UpperCAmelCase )
A : str = [frame_duration] * len(_UpperCAmelCase )
if extend_frames:
A : Any = 1.5
A : Optional[int] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_UpperCAmelCase ) )
imageio.mimsave(_UpperCAmelCase , _UpperCAmelCase , duration=_UpperCAmelCase )
print(f'''gif saved to {output_path}''' )
def snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
A : Tuple = preprocess(Image.open(_UpperCAmelCase ) , target_image_size=256 ).to(self.device )
A : List[str] = preprocess_vqgan(_UpperCAmelCase )
A, *A : Dict = self.vqgan.encode(_UpperCAmelCase )
return z
def snake_case ( self , _UpperCAmelCase ):
A : int = self.latent.detach().requires_grad_()
A : List[str] = base_latent + transform_vector
if self.quantize:
A, *A : List[str] = self.vqgan.quantize(_UpperCAmelCase )
else:
A : Any = trans_latent
return self.vqgan.decode(_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
A : List[str] = self.clip_preprocessor(text=_UpperCAmelCase , images=_UpperCAmelCase , return_tensors='''pt''' , padding=_UpperCAmelCase )
A : int = self.clip(**_UpperCAmelCase )
A : Dict = clip_outputs.logits_per_image
if weights is not None:
A : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : List[Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , _UpperCAmelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
A : str = self._get_clip_similarity(neg_prompts['''prompts'''] , _UpperCAmelCase , weights=neg_prompts['''weights'''] )
else:
A : Union[str, Any] = torch.tensor([1] , device=self.device )
A : Optional[int] = -torch.log(_UpperCAmelCase ) + torch.log(_UpperCAmelCase )
return loss
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Optional[int] = torch.randn_like(self.latent , requires_grad=_UpperCAmelCase , device=self.device )
A : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A : List[Any] = self._add_vector(_UpperCAmelCase )
A : str = loop_post_process(_UpperCAmelCase )
A : Optional[int] = self._get_CLIP_loss(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print('''CLIP loss''' , _UpperCAmelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
wandb.init(reinit=_UpperCAmelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
A : Optional[Any] = Image.open(_UpperCAmelCase )
A : Dict = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(_UpperCAmelCase ) )
def snake_case ( self , _UpperCAmelCase ):
if not prompts:
return []
A : Optional[int] = []
A : int = []
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : Tuple = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_UpperCAmelCase , (tuple, list) ):
A : List[str] = prompt[0]
A : Any = float(prompt[1] )
elif ":" in prompt:
A, A : Optional[int] = prompt.split(''':''' )
A : Optional[int] = float(_UpperCAmelCase )
else:
A : int = prompt
A : List[str] = 1.0
processed_prompts.append(_UpperCAmelCase )
weights.append(_UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_UpperCAmelCase , device=self.device ),
}
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , ):
if image_path:
A : List[str] = self._get_latent(_UpperCAmelCase )
else:
A : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
A : Optional[int] = self.process_prompts(_UpperCAmelCase )
A : str = self.process_prompts(_UpperCAmelCase )
if save_final and save_path is None:
A : List[Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
else:
A : Union[str, Any] = save_path + '''_''' + get_timestamp()
os.makedirs(_UpperCAmelCase )
A : Optional[int] = save_path
A : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_UpperCAmelCase ) )
A : List[Any] = loop_post_process(_UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ):
if show_intermediate:
show_pil(_UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_UpperCAmelCase )} )
if show_final:
show_pil(_UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 537
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = XLMProphetNetTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
A : Union[str, Any] = '''[PAD]'''
A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self ):
A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def snake_case ( self ):
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
A : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def snake_case ( self ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def snake_case ( self ):
A : Union[str, Any] = '''Hello World!'''
A : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case ( self ):
# fmt: off
A : List[Any] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 537
| 1
|
'''simple docstring'''
import cva
import numpy as np
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ):
"""simple docstring"""
if k in (0.0_4, 0.0_6):
__A : List[str] = k
__A : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Optional[Any] = cva.imread(__lowercase , 0 )
__A ,__A : Union[str, Any] = img.shape
__A : list[list[int]] = []
__A : List[Any] = img.copy()
__A : str = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
__A ,__A : Tuple = np.gradient(__lowercase )
__A : Dict = dx**2
__A : int = dy**2
__A : Optional[Any] = dx * dy
__A : Tuple = 0.0_4
__A : List[str] = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
__A : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : int = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__A : List[str] = (wxx * wyy) - (wxy**2)
__A : Union[str, Any] = wxx + wyy
__A : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase_ : Dict = HarrisCorner(0.04, 3)
UpperCAmelCase_ , UpperCAmelCase_ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 365
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
__A : Dict = ArgumentParser('Accelerate CLI tool', usage='accelerate <command> [<args>]', allow_abbrev=UpperCamelCase__ )
__A : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__A : Optional[Any] = parser.parse_args()
if not hasattr(UpperCamelCase__, 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 365
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ):
A = None
A = BloomTokenizerFast
A = BloomTokenizerFast
A = True
A = False
A = "tokenizer_file"
A = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ : Optional[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Optional[int] , **UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[str] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowerCamelCase_ : List[str] = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCamelCase_ : str = tokenizer.batch_encode_plus(UpperCamelCase__ )['''input_ids''']
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Tuple=6 ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCamelCase_ : Tuple = '''This is a simple input'''
lowerCamelCase_ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCamelCase_ : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
lowerCamelCase_ : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowerCamelCase_ : str = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase_ : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCamelCase__ )
lowerCamelCase_ : List[Any] = next(iter(UpperCamelCase__ ) )['''premise'''] # pick up one data
lowerCamelCase_ : str = list(sample_data.values() )
lowerCamelCase_ : Tuple = list(map(tokenizer.encode , UpperCamelCase__ ) )
lowerCamelCase_ : int = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 700
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase_ : int = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase_ : str = str(bin(__UpperCAmelCase ) )[2:]
lowerCamelCase_ : Dict = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[int] , **__a : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : Union[str, Any] , **__a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : str , **__a : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : str , **__a : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Any , **__a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : List[Any] , **__a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : int , **__a : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[Any] , *__a : List[str] , **__a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : Tuple , **__a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[str] , **__a : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : str , **__a : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Tuple , **__a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : Dict , **__a : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : int , *__a : Tuple , **__a : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : int , **__a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : List[Any] , **__a : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Optional[Any] , **__a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : List[Any] , **__a : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Any , *__a : Any , **__a : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : int , **__a : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Any , **__a : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[str] , *__a : List[Any] , **__a : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Tuple , **__a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Dict , **__a : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Any , **__a : Any ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
def a_ ( *_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Any ) -> int:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : str ) -> Optional[Any]:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Optional[int] ) -> str:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : int ) -> Dict:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : int ,**_UpperCAmelCase : List[str] ) -> int:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : str ) -> int:
requires_backends(_UpperCAmelCase ,['torch'] )
def a_ ( *_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : Optional[int] ) -> List[Any]:
requires_backends(_UpperCAmelCase ,['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : int , **__a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Any , **__a : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : int , **__a : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[Any] , *__a : Tuple , **__a : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : List[Any] , **__a : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : int , **__a : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : str , **__a : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : List[str] , **__a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : int , **__a : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : Any , **__a : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : int , *__a : int , **__a : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Optional[Any] , **__a : str ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : Tuple , **__a : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[int] , **__a : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : List[str] , **__a : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : int , **__a : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[str] , *__a : Tuple , **__a : int ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : str , **__a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *__a : List[str] , **__a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : Optional[int] , **__a : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : str , **__a : Any ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : int , **__a : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Optional[int] , **__a : Tuple ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : int , *__a : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : int , **__a : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Dict , **__a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Any , *__a : Any , **__a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Dict , **__a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Tuple , **__a : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Tuple , **__a : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Optional[int] , **__a : int ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : List[str] , **__a : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : Union[str, Any] , **__a : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : str , **__a : str ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Dict , **__a : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : List[str] , **__a : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : Dict , **__a : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Any , **__a : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : int , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : List[Any] , **__a : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Dict , **__a : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Optional[Any] , **__a : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Dict , **__a : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : str , **__a : int ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Any , **__a : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Optional[int] , **__a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Tuple , *__a : List[Any] , **__a : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *__a : List[Any] , **__a : Tuple ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : int , **__a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : Dict , **__a : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Any , **__a : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : Union[str, Any] , **__a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : str , *__a : List[Any] , **__a : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : Tuple , **__a : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : List[str] , **__a : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : Dict , **__a : Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : Optional[Any] , **__a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Tuple , **__a : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Dict , **__a : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : List[Any] , *__a : str , **__a : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : int , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : int , **__a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : Optional[Any] , **__a : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : List[Any] , **__a : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : str , **__a : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *__a : Tuple , **__a : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Any , **__a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Dict , **__a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : int , *__a : str , **__a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Union[str, Any] , **__a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : Optional[Any] , **__a : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : int , **__a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *__a : Optional[Any] , **__a : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Optional[Any] , **__a : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : int , **__a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *__a : Dict , **__a : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[int] , *__a : Dict , **__a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *__a : str , **__a : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Dict , **__a : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : int ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *__a : Optional[Any] , **__a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *__a : List[Any] , **__a : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *__a : Any , **__a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
| 286
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = BlenderbotSmallTokenizer
A__ = False
def A_ ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
__snake_case : Any = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__snake_case : str = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__snake_case : Tuple = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def A_ ( self : Tuple , **__a : Optional[int] ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , __a : int ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = 'adapt act apte'
__snake_case : List[str] = 'adapt act apte'
return input_text, output_text
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : List[str] = 'adapt act apte'
__snake_case : List[Any] = ['adapt', 'act', 'ap@@', 'te']
__snake_case : Optional[int] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__snake_case : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__snake_case : List[str] = 'I am a small frog.'
__snake_case : List[Any] = tok([src_text] , padding=__a , truncation=__a )['input_ids']
__snake_case : int = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__snake_case : Optional[Any] = 'I am a small frog .'
__snake_case : List[str] = '.'
__snake_case : List[str] = tok(__a )['input_ids']
__snake_case : str = tok(__a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 286
| 1
|
"""simple docstring"""
def snake_case__ ( _snake_case : int , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCamelCase__ = n - k
# Calculate C(n,k)
for i in range(_snake_case ):
result *= n - i
result //= i + 1
return result
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , _snake_case ) // (node_count + 1)
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
UpperCamelCase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
return catalan_number(_snake_case ) * factorial(_snake_case )
if __name__ == "__main__":
A : List[Any] = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 304
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def snake_case__ ( _snake_case : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCamelCase__ = emb.weight.data
return lin_layer
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = torch.load(_snake_case , map_location="cpu" )
UpperCamelCase__ = Namespace(**checkpoint["cfg"]["model"] )
UpperCamelCase__ = checkpoint["model"]
remove_ignore_keys_(_snake_case )
UpperCamelCase__ = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCamelCase__ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
UpperCamelCase__ = XGLMConfig(
vocab_size=_snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCamelCase__ = XGLMForCausalLM(_snake_case )
UpperCamelCase__ = model.load_state_dict(_snake_case , strict=_snake_case )
print(_snake_case )
UpperCamelCase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A : Dict = parser.parse_args()
A : Any = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304
| 1
|
import re
def UpperCamelCase_( _A :List[str] )-> Optional[int]:
return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )]
def UpperCamelCase_( _A :Any )-> str:
UpperCamelCase__ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCamelCase_( _A :Any , _A :Any , _A :Any )-> Union[str, Any]:
try:
UpperCamelCase__ = split_input(A__ )
if upper:
UpperCamelCase__ = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCamelCase__ = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCamelCase_( _A :List[str] )-> Optional[Any]:
return to_simple_case(A__ )
def UpperCamelCase_( _A :int )-> str:
try:
UpperCamelCase__ = to_simple_case(A__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCamelCase_( _A :Tuple , _A :Dict )-> Union[str, Any]:
return to_complex_case(A__ , A__ , "_" )
def UpperCamelCase_( _A :Tuple , _A :Tuple )-> Dict:
return to_complex_case(A__ , A__ , "-" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 551
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] ={
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 1:
return lst
UpperCAmelCase : List[str] = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase : List[str] = 1
return lst
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 695
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695
| 1
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ = logging.INFO
logger.setLevel(lowerCAmelCase__ )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : Optional[int] , _UpperCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
UpperCAmelCase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ = 1
UpperCAmelCase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=1.0 , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = max_gumbel_temp
UpperCAmelCase_ = min_gumbel_temp
UpperCAmelCase_ = gumbel_temp_decay
def lowercase__ ( self : List[Any] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCAmelCase_ = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
configure_logger(lowerCAmelCase__ , lowerCAmelCase__ )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCAmelCase__ )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ = datasets.map(
lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
UpperCAmelCase_ = vectorized_datasets.filter(
lambda lowerCAmelCase__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCAmelCase__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ = vectorized_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
UpperCAmelCase_ = WavaVecaForPreTraining(lowerCAmelCase__ )
UpperCAmelCase_ = DataCollatorForWavaVecaPretraining(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaPreTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowerCAmelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 82
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case ( _snake_case ):
__magic_name__ = 'donut-swin'
__magic_name__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , A : List[Any]=2_2_4 , A : List[str]=4 , A : str=3 , A : Tuple=9_6 , A : Dict=[2, 2, 6, 2] , A : str=[3, 6, 1_2, 2_4] , A : List[str]=7 , A : Dict=4.0 , A : str=True , A : Union[str, Any]=0.0 , A : Union[str, Any]=0.0 , A : str=0.1 , A : Dict="gelu" , A : Union[str, Any]=False , A : Dict=0.02 , A : List[Any]=1E-5 , **A : Any , ):
'''simple docstring'''
super().__init__(**A )
a : Any = image_size
a : Tuple = patch_size
a : str = num_channels
a : int = embed_dim
a : Any = depths
a : int = len(A )
a : Any = num_heads
a : Tuple = window_size
a : List[Any] = mlp_ratio
a : Tuple = qkv_bias
a : List[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : List[Any] = drop_path_rate
a : str = hidden_act
a : Optional[Any] = use_absolute_embeddings
a : Optional[int] = layer_norm_eps
a : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : str = int(embed_dim * 2 ** (len(A ) - 1) )
| 713
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def snake_case (A_ :int = 1_0_0_0_0_0_0 , A_ :int = 1_0 ):
'''simple docstring'''
a : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 118
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MobileNetV2FeatureExtractor"""]
UpperCAmelCase = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( a__ : list ) -> float:
if not nums:
raise ValueError('''List is empty''' )
return sum(a__ ) / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 228
|
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = StableDiffusionControlNetImgaImgPipeline
_snake_case : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_snake_case : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
_snake_case : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : int )-> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : str , A_ : Optional[int] , A_ : Optional[Any]=0 )-> List[Any]:
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = 2
__UpperCamelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , )
__UpperCamelCase = floats_tensor(control_image.shape , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((64, 64) )
__UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A ( self : List[str] )-> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A ( self : List[Any] )-> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A ( self : Any )-> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = StableDiffusionControlNetImgaImgPipeline
_snake_case : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_snake_case : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : Optional[Any] )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(A_ : int ):
if isinstance(A_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
__UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase = MultiControlNetModel([controlneta, controlneta] )
__UpperCamelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : int , A_ : Dict , A_ : str=0 )-> Optional[Any]:
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = 2
__UpperCamelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
]
__UpperCamelCase = floats_tensor(control_image[0].shape , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((64, 64) )
__UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A ( self : List[str] )-> List[str]:
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
__UpperCamelCase = 10.0
__UpperCamelCase = 4
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = steps
__UpperCamelCase = scale
__UpperCamelCase = pipe(**A_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def A ( self : Optional[int] )-> Any:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A ( self : List[Any] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A ( self : int )-> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def A ( self : List[str] )-> Tuple:
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str )-> Optional[Any]:
__UpperCamelCase = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
__UpperCamelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=A_ , controlnet=A_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase = "evil space-punk bird"
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_12, 5_12) )
__UpperCamelCase = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_12, 5_12) )
__UpperCamelCase = pipe(
A_ , A_ , control_image=A_ , generator=A_ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
__UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 228
| 1
|
_lowerCamelCase : Dict = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 686
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCAmelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__UpperCAmelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def UpperCAmelCase ( self : Tuple , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(a_ , a_ , sample_weight=a_ ) ),
}
| 642
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def snake_case_ ( *a__ , **a__):
pass
def lowerCAmelCase__ ( UpperCamelCase_ : Image )-> str:
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case_ ( self , a__ , a__ , a__):
A__ = DepthEstimationPipeline(model=a__ , image_processor=a__)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self , a__ , a__):
A__ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , a__)
import datasets
A__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
A__ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , a__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def snake_case_ ( self):
pass
@slow
@require_torch
def snake_case_ ( self):
A__ = '''Intel/dpt-large'''
A__ = pipeline('''depth-estimation''' , model=a__)
A__ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
A__ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def snake_case_ ( self):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 526
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class A ( _A ):
UpperCamelCase__ : Dict ='mvp'
UpperCamelCase__ : Dict =['past_key_values']
UpperCamelCase__ : int ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , lowercase_ : Dict=5_0267 , lowercase_ : Optional[Any]=1024 , lowercase_ : List[Any]=12 , lowercase_ : Tuple=4096 , lowercase_ : Optional[int]=16 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=4096 , lowercase_ : Tuple=16 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]="gelu" , lowercase_ : int=1024 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Any=0.02 , lowercase_ : List[str]=0.0 , lowercase_ : Any=False , lowercase_ : str=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=True , lowercase_ : Tuple=2 , lowercase_ : Optional[int]=2 , lowercase_ : str=False , lowercase_ : Union[str, Any]=100 , lowercase_ : Optional[int]=800 , **lowercase_ : Optional[Any] , ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =vocab_size
_lowerCamelCase : Dict =max_position_embeddings
_lowerCamelCase : Any =d_model
_lowerCamelCase : List[str] =encoder_ffn_dim
_lowerCamelCase : List[Any] =encoder_layers
_lowerCamelCase : Union[str, Any] =encoder_attention_heads
_lowerCamelCase : List[Any] =decoder_ffn_dim
_lowerCamelCase : Tuple =decoder_layers
_lowerCamelCase : Optional[int] =decoder_attention_heads
_lowerCamelCase : str =dropout
_lowerCamelCase : Tuple =attention_dropout
_lowerCamelCase : Any =activation_dropout
_lowerCamelCase : str =activation_function
_lowerCamelCase : List[Any] =init_std
_lowerCamelCase : Union[str, Any] =encoder_layerdrop
_lowerCamelCase : Union[str, Any] =decoder_layerdrop
_lowerCamelCase : Optional[Any] =classifier_dropout
_lowerCamelCase : Union[str, Any] =use_cache
_lowerCamelCase : Optional[Any] =encoder_layers
_lowerCamelCase : int =scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : int =use_prompt
_lowerCamelCase : Optional[int] =prompt_length
_lowerCamelCase : Optional[int] =prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCamelCase__ ):
_lowerCamelCase : Any =self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 464
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""image_processor""", """tokenizer"""]
a__ = """BridgeTowerImageProcessor"""
a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Dict , ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
__magic_name__ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 529
| 0
|
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__a : Union[str, Any] = """pytorch_model.bin"""
__a : Any = """pytorch_model.bin.index.json"""
__a : Optional[Any] = """adapter_config.json"""
__a : str = """adapter_model.bin"""
__a : int = """adapter_model.safetensors"""
__a : Any = """tf_model.h5"""
__a : Any = """tf_model.h5.index.json"""
__a : Optional[int] = """model.ckpt"""
__a : Optional[Any] = """flax_model.msgpack"""
__a : Optional[Any] = """flax_model.msgpack.index.json"""
__a : Optional[int] = """model.safetensors"""
__a : Dict = """model.safetensors.index.json"""
__a : str = """config.json"""
__a : str = """preprocessor_config.json"""
__a : str = FEATURE_EXTRACTOR_NAME
__a : Optional[Any] = """generation_config.json"""
__a : List[str] = """modelcard.json"""
__a : List[str] = """▁"""
__a : List[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__a : int = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__a : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__a : Optional[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
if version.parse(__snake_case ) < version.parse(__snake_case ):
if "dev" in min_version:
UpperCamelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
UpperCamelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 717
|
def a_ ( __snake_case ) -> list[int]:
'''simple docstring'''
UpperCamelCase_ = [0 for i in range(len(__snake_case ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase_ , UpperCamelCase_ = 0, 0
for i in range(1 , len(__snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase_ = min_edge
while go_next(__snake_case , __snake_case , __snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase_ , UpperCamelCase_ = i, i + z_result[i] - 1
return z_result
def a_ ( __snake_case , __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(__snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
UpperCamelCase_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=2 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 2
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DeiTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DeiTForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase = 1
lowercase = DeiTForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.type_sequence_label_size
lowercase = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : List[str] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ : Dict = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : str = False
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
lowercase = problem_type["""title"""]
lowercase = problem_type["""num_labels"""]
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
lowercase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
lowercase = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = DeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
lowercase = inputs.pixel_values.to(__lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase = model(__lowerCAmelCase )
| 359
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=32 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[10, 20, 30, 40] , __lowerCAmelCase=[2, 2, 3, 2] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=10 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=["stage2", "stage3", "stage4"] , __lowerCAmelCase=[2, 3, 4] , __lowerCAmelCase=None , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = num_stages
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_labels
lowercase = initializer_range
lowercase = out_features
lowercase = out_indices
lowercase = scope
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = ConvNextModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = ConvNextForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = ConvNextBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase = None
lowercase = ConvNextBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : List[str] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
snake_case__ : Tuple = False
snake_case__ : List[str] = False
def A__ ( self ):
"""simple docstring"""
lowercase = ConvNextModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = ConvNextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@require_torch
class _A ( unittest.TestCase , lowerCAmelCase ):
snake_case__ : int = (ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Tuple = ConvNextConfig
snake_case__ : Dict = False
def A__ ( self ):
"""simple docstring"""
lowercase = ConvNextModelTester(self )
| 359
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_A = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_A = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_A = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __a ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="auto" , _UpperCamelCase=-1 , _UpperCamelCase=0.9 , _UpperCamelCase=5 , _UpperCamelCase=500 , _UpperCamelCase="gpt2-large" , _UpperCamelCase=-1 , _UpperCamelCase=1_024 , _UpperCamelCase=25 , _UpperCamelCase=5 , _UpperCamelCase=True , _UpperCamelCase=25 , ) -> str:
lowerCAmelCase_ = compute_mauve(
p_text=_UpperCamelCase , q_text=_UpperCamelCase , p_features=_UpperCamelCase , q_features=_UpperCamelCase , p_tokens=_UpperCamelCase , q_tokens=_UpperCamelCase , num_buckets=_UpperCamelCase , pca_max_data=_UpperCamelCase , kmeans_explained_var=_UpperCamelCase , kmeans_num_redo=_UpperCamelCase , kmeans_max_iter=_UpperCamelCase , featurize_model_name=_UpperCamelCase , device_id=_UpperCamelCase , max_text_length=_UpperCamelCase , divergence_curve_discretization_size=_UpperCamelCase , mauve_scaling_factor=_UpperCamelCase , verbose=_UpperCamelCase , seed=_UpperCamelCase , )
return out
| 714
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A = datasets.load_iris()
_A = np.array(data["data"])
_A = np.array(data["target"])
_A = data["target_names"]
_A, _A, _A, _A = train_test_split(X, y)
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(__lowerCAmelCase ) - np.array(__lowerCAmelCase ) )
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any=5 ):
"""simple docstring"""
lowerCAmelCase_ = zip(__lowerCAmelCase , __lowerCAmelCase )
# List of distances of all points from the point to be classified
lowerCAmelCase_ = []
for data_point in data:
lowerCAmelCase_ = euclidean_distance(data_point[0] , __lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase_ = [i[1] for i in sorted(__lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase_ = Counter(__lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 279
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(snake_case__ , "num_heads" ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , snake_case__ : str , snake_case__ : str=13 , snake_case__ : int=64 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=[16, 48, 96] , snake_case__ : Dict=[1, 3, 6] , snake_case__ : int=[1, 2, 10] , snake_case__ : Any=[7, 3, 3] , snake_case__ : Dict=[4, 2, 2] , snake_case__ : str=[2, 1, 1] , snake_case__ : str=[2, 2, 2] , snake_case__ : Optional[Any]=[False, False, True] , snake_case__ : List[str]=[0.0, 0.0, 0.0] , snake_case__ : int=0.02 , snake_case__ : Dict=1e-12 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Tuple = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : int = patch_padding
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[str] = cls_token
UpperCAmelCase__ : Union[str, Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFCvtModel(config=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ , training=snake_case__ )
UpperCAmelCase__ : Dict = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase ( self : str , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : List[Any] = TFCvtForImageClassification(snake_case__ )
UpperCAmelCase__ : int = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase_ : str = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : int = False
lowercase_ : Optional[Any] = False
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFCvtModelTester(self )
UpperCAmelCase__ : List[Any] = TFCvtConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCamelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(snake_case__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(snake_case__ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = outputs.hidden_states
UpperCAmelCase__ : Optional[int] = len(self.model_tester.depth )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def snake_case_ ( ):
UpperCAmelCase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : List[str] = self.default_image_processor
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
UpperCAmelCase__ : Tuple = model(**snake_case__ )
# verify the logits
UpperCAmelCase__ : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : str = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 199
|
'''simple docstring'''
from math import factorial
SCREAMING_SNAKE_CASE = {str(digit): factorial(digit) for digit in range(1_0)}
def snake_case_ ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase__ ) )
def snake_case_ ( lowercase__ = 6_0 , lowercase__ = 1_0_0_0_0_0_0 ):
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCAmelCase__ : Union[str, Any] = 0
# the cached sizes of the previous chains
UpperCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowercase__ ):
# The temporary set will contain the elements of the chain
UpperCAmelCase__ : Any = set()
UpperCAmelCase__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase__ : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase__ )
chain_set_length += 1
UpperCAmelCase__ : List[str] = digit_factorial_sum(lowercase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase__ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 199
| 1
|
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a (lowerCAmelCase__ ):
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def a (lowerCAmelCase__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def a (lowerCAmelCase__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__a = 0
# Doctest custom flag to ignore output.
SCREAMING_SNAKE_CASE = doctest.register_optionflag('IGNORE_RESULT')
SCREAMING_SNAKE_CASE = doctest.OutputChecker
class __UpperCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
SCREAMING_SNAKE_CASE = CustomOutputChecker
SCREAMING_SNAKE_CASE = HfDoctestModule
SCREAMING_SNAKE_CASE = HfDocTestParser
| 712
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
__a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__a = """cpu"""
__a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__a = Path(lowerCAmelCase__ )
# TEXT ENCODER
__a = pipeline.text_encoder.config.max_position_embeddings
__a = pipeline.text_encoder.config.hidden_size
__a = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
__a = pipeline.unet.config.in_channels
__a = pipeline.unet.config.sample_size
__a = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
__a = str(unet_path.absolute().as_posix() )
__a = os.path.dirname(lowerCAmelCase__ )
__a = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location="""weights.pb""" , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
__a = pipeline.vae
__a = vae_encoder.config.in_channels
__a = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__a = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
__a = pipeline.vae
__a = vae_decoder.config.latent_channels
__a = vae_decoder.config.out_channels
# forward only through the decoder part
__a = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__a = pipeline.safety_checker
__a = safety_checker.config.vision_config.num_channels
__a = safety_checker.config.vision_config.image_size
__a = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
__a = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__a = pipeline.feature_extractor
else:
__a = None
__a = None
__a = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print("""ONNX pipeline saved to""" , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
__a = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 209
| 0
|
from typing import Any
import numpy as np
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase_ , matrix.conjugate().T )
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = v.conjugate().T
UpperCamelCase = v_star.dot(lowercase_ )
assert isinstance(lowercase_ , np.ndarray )
return (v_star_dot.dot(lowercase_ )) / (v_star.dot(lowercase_ ))
def __magic_name__ ( ) -> None:
'''simple docstring'''
UpperCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase_ ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(lowercase_ , lowercase_ ) )
UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase_ ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(lowercase_ , lowercase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 606
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a : Optional[Any] = """facebook/wmt19-en-de"""
__a : Union[str, Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__a : Optional[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__a : Optional[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__a : Union[str, Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 606
| 1
|
'''simple docstring'''
import math
def _lowercase ( UpperCamelCase__ : int = 100 ):
__A : Optional[int] = sum(i * i for i in range(1, n + 1 ) )
__A : Union[str, Any] = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 540
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
"""simple docstring"""
__A : Optional[int] = parent
__A : Tuple = batch_size
__A : Optional[int] = seq_length
__A : Tuple = is_training
__A : Optional[Any] = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : str = vocab_size
__A : Dict = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : str = intermediate_size
__A : List[Any] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : int = type_vocab_size
__A : int = type_sequence_label_size
__A : str = initializer_range
__A : str = num_labels
__A : str = num_choices
__A : Any = scope
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Union[str, Any] = None
if self.use_input_mask:
__A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
if self.use_token_type_ids:
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Union[str, Any] = None
__A : Optional[int] = None
__A : List[str] = None
if self.use_labels:
__A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , use_stable_embedding=__lowercase , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[str] = OpenLlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase )
__A : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[str] = True
__A : int = OpenLlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
__A : List[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__A : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[Any] = True
__A : Optional[int] = True
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__A : List[str] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__A : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
__A : Any = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
# select random slice
__A : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Optional[Any] = config_and_inputs
__A : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowercase : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = OpenLlamaModelTester(self )
__A : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : List[str] = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : int = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(__lowercase )
__A : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : List[str] = 'single_label_classification'
__A : Dict = input_dict['input_ids']
__A : Dict = input_ids.ne(1 ).to(__lowercase )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Union[str, Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : int = 'multi_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : str = input_ids.ne(1 ).to(__lowercase )
__A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : int = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__A : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : Union[str, Any] = OpenLlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
__A : Optional[int] = original_model(__lowercase ).last_hidden_state
__A : int = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = {'type': scaling_type, 'factor': 1_0.0}
__A : str = OpenLlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
__A : Dict = scaled_model(__lowercase ).last_hidden_state
__A : List[str] = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
| 540
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
"""simple docstring"""
a : int
a : int
class a :
"""simple docstring"""
def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]:
__UpperCAmelCase : list[list[Edge]] = [[] for _ in range(__lowercase )]
__UpperCAmelCase : Optional[Any] = size
def __getitem__( self : int , __lowercase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
return self._size
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : int , __lowercase : int ) -> Any:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : int ) -> int | None:
__UpperCAmelCase : List[str] = deque([start_vertex] )
__UpperCAmelCase : list[int | None] = [None] * self.size
__UpperCAmelCase : Optional[int] = 0
while queue:
__UpperCAmelCase : Tuple = queue.popleft()
__UpperCAmelCase : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase : Any = current_distance + edge.weight
__UpperCAmelCase : Tuple = distances[edge.destination_vertex]
if (
isinstance(__lowercase , __lowercase )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
def _a ( __UpperCamelCase : List[Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase__ : Optional[Any] = len(__UpperCamelCase )
lowerCAmelCase__ : Dict = max(__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(__UpperCamelCase )
# create the counting array
lowerCAmelCase__ : Any = coll_max + 1 - coll_min
lowerCAmelCase__ : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__UpperCamelCase ):
lowerCAmelCase__ : Any = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase__ : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__UpperCamelCase ) ):
lowerCAmelCase__ : Optional[int] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _a ( __UpperCamelCase : int ):
return "".join([chr(__UpperCamelCase ) for i in counting_sort([ord(__UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
A__ : str = input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[str] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 233
| 0
|
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__A : List[Any] = 4
__A : Any = (1 << p) - 1
for _ in range(p - 2 ):
__A : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 709
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_UpperCamelCase : List[str] = TypeVar('T')
_UpperCamelCase : int = TypeVar('U')
class snake_case__ ( Generic[T, U]):
def __init__( self : Tuple , _A : Optional[int] , _A : str ) -> Optional[int]:
UpperCAmelCase_ : int = key
UpperCAmelCase_ : List[Any] = val
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : List[Any] ) -> Dict:
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class snake_case__ ( Generic[T, U]):
def __init__( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ : Any = self.rear, self.head
def __repr__( self : int ) -> Dict:
UpperCAmelCase_ : List[str] = ['DoubleLinkedList']
UpperCAmelCase_ : List[str] = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
UpperCAmelCase_ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def A ( self : List[Any] , _A : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCAmelCase_ : Optional[Any] = node
UpperCAmelCase_ : Union[str, Any] = previous
UpperCAmelCase_ : Dict = node
UpperCAmelCase_ : Union[str, Any] = self.rear
def A ( self : List[str] , _A : str ) -> Tuple:
if node.prev is None or node.next is None:
return None
UpperCAmelCase_ : Dict = node.next
UpperCAmelCase_ : str = node.prev
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : str = None
return node
class snake_case__ ( Generic[T, U]):
a_ = {}
def __init__( self : List[str] , _A : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCAmelCase_ : Optional[int] = capacity
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : str ) -> Optional[Any]:
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : int , _A : Tuple ) -> Optional[int]:
return key in self.cache
def A ( self : Union[str, Any] , _A : Tuple ) -> Optional[int]:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCAmelCase_ : str = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def A ( self : str , _A : int , _A : Union[str, Any] ) -> int:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCAmelCase_ : int = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCAmelCase_ : List[str] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCAmelCase_ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCAmelCase_ : int = value
self.list.add(__lowerCAmelCase )
@classmethod
def A ( cls : Optional[int] , _A : Tuple = 1_28 ) -> int:
def cache_decorator_inner(_A : Tuple ) -> Callable[..., U]:
def cache_decorator_wrapper(*_A : int ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCAmelCase_ : Union[str, Any] = LRUCache(__lowerCAmelCase )
UpperCAmelCase_ : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCAmelCase_ : int = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , '''cache_info''' , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 541
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__: Optional[int] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__: Any = "UperNetConfig"
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1 , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.BatchNormad(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.ReLU()
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = self.conv(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.batch_norm(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.activation(__lowerCAmelCase )
return output
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Tuple = [
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = input
for layer in self.layers:
SCREAMING_SNAKE_CASE_ : Tuple = layer(__lowerCAmelCase )
return hidden_state
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = pool_scales
SCREAMING_SNAKE_CASE_ : List[Any] = align_corners
SCREAMING_SNAKE_CASE_ : List[str] = in_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = channels
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i, pool_scale in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE_ : List[Any] = ppm(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = config
SCREAMING_SNAKE_CASE_ : Dict = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = config.hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE_ : List[str] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE_ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE_ : Tuple = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE_ : Optional[int] = UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE_ : List[str] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __A ( self ):
self.apply(self._init_weights )
def __A ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs[-1]
SCREAMING_SNAKE_CASE_ : str = [x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat(__lowerCAmelCase , dim=1 )
SCREAMING_SNAKE_CASE_ : Tuple = self.bottleneck(__lowerCAmelCase )
return output
def __A ( self , __lowerCAmelCase ):
# build laterals
SCREAMING_SNAKE_CASE_ : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
SCREAMING_SNAKE_CASE_ : str = len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE_ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE_ : Dict = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat(__lowerCAmelCase , dim=1 )
SCREAMING_SNAKE_CASE_ : Tuple = self.fpn_bottleneck(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.classifier(__lowerCAmelCase )
return output
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE_ : Dict = config.auxiliary_channels
SCREAMING_SNAKE_CASE_ : Tuple = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE_ : int = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE_ : Dict = in_index
SCREAMING_SNAKE_CASE_ : Optional[Any] = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Identity()
else:
SCREAMING_SNAKE_CASE_ : int = nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __A ( self ):
self.apply(self._init_weights )
def __A ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , __lowerCAmelCase ):
# just take the relevant feature maps
SCREAMING_SNAKE_CASE_ : Any = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE_ : Any = self.convs(__lowerCAmelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE_ : str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier(__lowerCAmelCase )
return output
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = UperNetConfig
__lowerCamelCase : str = 'pixel_values'
__lowerCamelCase : Union[str, Any] = True
def __A ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __A ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = value
lowerCAmelCase__: Optional[Any] = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCAmelCase__: List[str] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , lowerCAmelCase , )
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE_ : int = UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def __A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE_ : Optional[int] = self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = outputs.feature_maps
SCREAMING_SNAKE_CASE_ : List[Any] = self.decode_head(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.auxiliary_head(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE_ : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE_ : List[Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 345
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
lowerCamelCase__ = TaTokenizer
lowerCamelCase__ = []
def __init__( self :Dict , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :Optional[int]="</s>" , _lowerCamelCase :Any="<unk>" , _lowerCamelCase :Optional[Any]="<pad>" , _lowerCamelCase :Tuple=1_0_0 , _lowerCamelCase :Tuple=None , **_lowerCamelCase :str , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__SCREAMING_SNAKE_CASE : int = len(set(filter(lambda _lowerCamelCase : bool('''extra_id_''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : List[str] = vocab_file
__SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE : Dict = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :Dict , _lowerCamelCase :Optional[int] , _lowerCamelCase :int ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , )
return max_model_length
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Any = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return list(
set(filter(lambda _lowerCamelCase : bool(re.search(r'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return [self.convert_tokens_to_ids(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
| 701
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
def __init__( self :str , _lowerCamelCase :Dict , _lowerCamelCase :int=2 , _lowerCamelCase :Any=5_6 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :int=9_9 , _lowerCamelCase :Optional[int]=3_2 , _lowerCamelCase :int=2 , _lowerCamelCase :List[str]=2 , _lowerCamelCase :Dict=7 , _lowerCamelCase :Union[str, Any]="gelu_new" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :int=0.1 , _lowerCamelCase :Union[str, Any]=5_1_2 , _lowerCamelCase :Optional[Any]=1_6 , _lowerCamelCase :int=2 , _lowerCamelCase :str=0.0_2 , _lowerCamelCase :Optional[Any]=4 , _lowerCamelCase :Optional[Any]="block_sparse" , _lowerCamelCase :int=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :int=2 , _lowerCamelCase :int=3 , ):
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Any = use_attention_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : str = rescale_embeddings
__SCREAMING_SNAKE_CASE : str = attention_type
__SCREAMING_SNAKE_CASE : Dict = use_bias
__SCREAMING_SNAKE_CASE : List[Any] = block_size
__SCREAMING_SNAKE_CASE : Dict = num_random_blocks
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Any ):
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase :Dict , _lowerCamelCase :Optional[int]=None , **_lowerCamelCase :List[str] ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : int = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :str , _lowerCamelCase :Dict , _lowerCamelCase :List[str]=1e-5 , _lowerCamelCase :Dict="outputs" , _lowerCamelCase :str=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 401
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase ( ctypes.Structure ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowercase () -> Optional[int]:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase () -> int:
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase () -> Dict:
try:
hide_cursor()
yield
finally:
show_cursor()
| 247
| 0
|
'''simple docstring'''
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ):
def count_of_possible_combinations(lowerCAmelCase__ : Tuple ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__A )
def __a ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
def count_of_possible_combinations_with_dp_array(
lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a__ : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , __A )
for item in array )
a__ : Any = answer
return answer
a__ : Union[str, Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__A , __A )
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ):
a__ : List[str] = [0] * (target + 1)
a__ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 5
__SCREAMING_SNAKE_CASE = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 701
|
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if b == 0:
return (1, 0)
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , a % b )
a__ : Optional[int] = a // b
return (y, x - k * y)
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int = na * na
a__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : Union[str, Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[Any] = (b % n + n) % n
return b
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
a__ , a__ : Union[str, Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = na * na
a__ : str = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 340
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a_ ( a ):
A__ : torch.FloatTensor
class a_ ( a , a ):
@register_to_config
def __init__( self : Any , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 20 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : Union[str, Any]=77 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "linear" , UpperCAmelCase__ : Optional[str] = "prd" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
snake_case : int = num_attention_heads
snake_case : List[str] = attention_head_dim
snake_case : Union[str, Any] = num_attention_heads * attention_head_dim
snake_case : str = additional_embeddings
snake_case : Union[str, Any] = time_embed_dim or inner_dim
snake_case : Optional[int] = embedding_proj_dim or embedding_dim
snake_case : Optional[Any] = clip_embed_dim or embedding_dim
snake_case : Union[str, Any] = Timesteps(UpperCAmelCase__ , UpperCAmelCase__ , 0 )
snake_case : str = TimestepEmbedding(UpperCAmelCase__ , UpperCAmelCase__ , out_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__ )
snake_case : str = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if embedding_proj_norm_type is None:
snake_case : Dict = None
elif embedding_proj_norm_type == "layer":
snake_case : Dict = nn.LayerNorm(UpperCAmelCase__ )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
snake_case : Optional[Any] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if encoder_hid_proj_type is None:
snake_case : Any = None
elif encoder_hid_proj_type == "linear":
snake_case : str = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
snake_case : Union[str, Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase__ ) )
if added_emb_type == "prd":
snake_case : Dict = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase__ ) )
elif added_emb_type is None:
snake_case : Tuple = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
snake_case : Any = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn='''gelu''' , attention_bias=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
if norm_in_type == "layer":
snake_case : str = nn.LayerNorm(UpperCAmelCase__ )
elif norm_in_type is None:
snake_case : str = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
snake_case : int = nn.LayerNorm(UpperCAmelCase__ )
snake_case : List[Any] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : int = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
snake_case : Tuple = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , UpperCAmelCase__ , persistent=UpperCAmelCase__ )
snake_case : Dict = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
snake_case : str = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[str] = {}
def fn_recursive_add_processors(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCAmelCase__ , '''set_processor''' ):
snake_case : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
def lowerCAmelCase( self : int , UpperCAmelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
snake_case : int = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(UpperCAmelCase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Any ):
if hasattr(UpperCAmelCase__ , '''set_processor''' ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
module.set_processor(UpperCAmelCase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , UpperCAmelCase__ , UpperCAmelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.BoolTensor] = None , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Union[str, Any] = hidden_states.shape[0]
snake_case : Union[str, Any] = timestep
if not torch.is_tensor(UpperCAmelCase__ ):
snake_case : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
snake_case : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case : Any = timesteps * torch.ones(UpperCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device )
snake_case : List[str] = self.time_proj(UpperCAmelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case : List[Any] = timesteps_projected.to(dtype=self.dtype )
snake_case : Optional[int] = self.time_embedding(UpperCAmelCase__ )
if self.embedding_proj_norm is not None:
snake_case : int = self.embedding_proj_norm(UpperCAmelCase__ )
snake_case : Union[str, Any] = self.embedding_proj(UpperCAmelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case : Union[str, Any] = self.encoder_hidden_states_proj(UpperCAmelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
snake_case : Optional[Any] = self.proj_in(UpperCAmelCase__ )
snake_case : str = self.positional_embedding.to(hidden_states.dtype )
snake_case : Optional[int] = []
snake_case : str = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case : Tuple = hidden_states[:, None, :]
snake_case : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case : List[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase__ , -1 , -1 )
additional_embeds.append(UpperCAmelCase__ )
snake_case : Optional[int] = torch.cat(
UpperCAmelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case : int = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case : Optional[int] = F.pad(
UpperCAmelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
snake_case : int = F.pad(UpperCAmelCase__ , (0, self.additional_embeddings) , value=0.0 )
snake_case : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case : Dict = self.norm_in(UpperCAmelCase__ )
for block in self.transformer_blocks:
snake_case : Any = block(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
snake_case : int = self.norm_out(UpperCAmelCase__ )
if self.prd_embedding is not None:
snake_case : Optional[int] = hidden_states[:, -1]
else:
snake_case : Optional[int] = hidden_states[:, additional_embeddings_len:]
snake_case : Optional[int] = self.proj_to_clip_embeddings(UpperCAmelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : Optional[int] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 598
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : str = logging.get_logger(__name__)
class a_ ( a ):
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 598
| 1
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = str(id_ )
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self ) -> Any:
"""simple docstring"""
return self.id
def snake_case__ ( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
self.neighbors.append(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = weight
def snake_case_ ( a__ : Optional[int] ,a__ : List[str] ,a__ : Dict ,a__ : List[str] ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__ )
graph[b - 1].add_edge(graph[a - 1] ,a__ )
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
__lowercase = []
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = graph[:]
while q:
__lowercase = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
for i in range(1 ,len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = list(a__ )
hq.heapify(a__ )
while h:
__lowercase = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 ,len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
|
'''simple docstring'''
import pprint
import requests
A : str = """https://zenquotes.io/api"""
def snake_case_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def snake_case_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
A : int = random_quotes()
pprint.pprint(response)
| 163
| 1
|
def _A ( __snake_case :int = 100_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = {1: 1}
for inputa in range(2 , __snake_case ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE = inputa
__SCREAMING_SNAKE_CASE = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 693
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = BertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
|
'''simple docstring'''
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase ( lowercase : str ) ->bool:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
lowercase__ = F'''Expected string as input, found {type(lowercase ).__name__}'''
raise TypeError(lowercase )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase ) != 9:
raise ValueError(lowercase )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase ) from ex
if letter.isdigit():
raise ValueError(lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowercase__ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_UpperCamelCase = g.get_repo('''huggingface/transformers''' )
_UpperCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_UpperCamelCase = sorted([comment for comment in issue.get_comments()], key=lambda lowercase : i.created_at, reverse=lowercase )
_UpperCamelCase = comments[0] if len(lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 98
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCAmelCase : List[str] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
_lowerCAmelCase : Optional[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case , __snake_case=None , __snake_case="base" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__snake_case )
__a =0
__a =Path(self.hparams.output_dir )
__a =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__snake_case , **__snake_case , )
else:
__a =config
__a =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __snake_case , __snake_case ):
assert hasattr(self.config , __snake_case ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __snake_case , getattr(self.hparams , __snake_case ) )
if tokenizer is None:
__a =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__snake_case , )
else:
__a =tokenizer
__a =MODEL_MODES[mode]
if model is None:
__a =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__snake_case , )
else:
__a =model
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =self.model_type.from_pretrained(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =arg_to_scheduler[self.hparams.lr_scheduler]
__a =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model
__a =['bias', 'LayerNorm.weight']
__a =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a =Adafactor(
__snake_case , lr=self.hparams.learning_rate , scale_parameter=__snake_case , relative_step=__snake_case )
else:
__a =AdamW(
__snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a =optimizer
__a =self.get_lr_scheduler()
return [optimizer], [scheduler]
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return self.validation_step(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.validation_end(__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if stage == "test":
__a =len(self.test_dataloader().dataset )
else:
__a =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__snake_case )
__a =len(self.train_dataloader().dataset )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = False ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.train_loader
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__snake_case , list(filter(__snake_case , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.output_dir.joinpath('best_tfmr' )
__a =self.step_count
self.model.save_pretrained(__snake_case )
self.tokenizer.save_pretrained(__snake_case )
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__snake_case , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'cache' ) , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__snake_case , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__snake_case , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__snake_case , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__snake_case , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__snake_case , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__snake_case , metavar=__snake_case , type=__snake_case , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__snake_case , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__snake_case )
parser.add_argument('--train_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--eval_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--adafactor' , action='store_true' )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__snake_case )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =trainer.lr_schedulers[0]['scheduler']
__a ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a =trainer.callback_metrics
# Log results
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a =trainer.callback_metrics
# Log and save results to file
__a =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__snake_case , 'w' ) as writer:
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=_snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCamelCase_( _snake_case : BaseTransformer , _snake_case : argparse.Namespace , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=True , _snake_case : Dict=[] , _snake_case : List[str]=None , _snake_case : Any=None , **_snake_case : str , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__a =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__a =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_snake_case )
if logging_callback is None:
__a =LoggingCallback()
__a ={}
if args.fpaa:
__a =16
if args.gpus > 1:
__a ='auto'
__a ='ddp'
__a =args.accumulate_grad_batches
__a =None
__a ='auto'
__a =pl.Trainer.from_argparse_args(
_snake_case , weights_summary=_snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **_snake_case , )
if args.do_train:
trainer.fit(_snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 242
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
def __init__( self : List[str] , snake_case_ : Optional[Any]=2 , snake_case_ : str=3 , snake_case_ : Union[str, Any]=64 , snake_case_ : Dict=None)-> str:
__lowerCAmelCase =np.random.default_rng(snake_case_)
__lowerCAmelCase =length
__lowerCAmelCase =rng.normal(size=(length,)).astype(np.floataa)
__lowerCAmelCase =a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self : Tuple)-> Dict:
return self.length
def __getitem__( self : List[str] , snake_case_ : Any)-> Union[str, Any]:
return {"x": self.x[i], "y": self.y[i]}
class __a ( torch.nn.Module ):
def __init__( self : List[str] , snake_case_ : Dict=0 , snake_case_ : Any=0 , snake_case_ : Any=False)-> int:
super().__init__()
__lowerCAmelCase =torch.nn.Parameter(torch.tensor([2, 3]).float())
__lowerCAmelCase =torch.nn.Parameter(torch.tensor([2, 3]).float())
__lowerCAmelCase =True
def UpperCamelCase ( self : Any , snake_case_ : str=None)-> Union[str, Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__lowerCAmelCase =False
return x * self.a[0] + self.b[0]
class __a ( torch.nn.Module ):
def __init__( self : str , snake_case_ : Optional[Any]=0 , snake_case_ : List[str]=0 , snake_case_ : int=False)-> Optional[Any]:
super().__init__()
__lowerCAmelCase =torch.nn.Parameter(torch.tensor(snake_case_).float())
__lowerCAmelCase =torch.nn.Parameter(torch.tensor(snake_case_).float())
__lowerCAmelCase =True
def UpperCamelCase ( self : str , snake_case_ : List[str]=None)-> Union[str, Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__lowerCAmelCase =False
return x * self.a + self.b
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int = 16 ) -> List[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase =load_dataset("""csv""" , data_files=__lowerCamelCase )
__lowerCAmelCase =datasets["""train"""].unique("""label""" )
__lowerCAmelCase ={v: i for i, v in enumerate(__lowerCamelCase )}
def tokenize_function(__lowerCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase =datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(__lowerCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase =DataLoader(tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=2 )
__lowerCAmelCase =DataLoader(tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 714
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 456
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_6 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=1_0 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :str = batch_size
lowerCAmelCase__ :List[str] = image_size
lowerCAmelCase__ :Optional[int] = patch_size
lowerCAmelCase__ :List[str] = num_channels
lowerCAmelCase__ :Tuple = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[str] = window_size
lowerCAmelCase__ :int = mlp_ratio
lowerCAmelCase__ :Union[str, Any] = qkv_bias
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = drop_path_rate
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :str = use_absolute_embeddings
lowerCAmelCase__ :int = patch_norm
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[int] = initializer_range
lowerCAmelCase__ :List[Any] = is_training
lowerCAmelCase__ :Optional[Any] = scope
lowerCAmelCase__ :int = use_labels
lowerCAmelCase__ :str = type_sequence_label_size
lowerCAmelCase__ :List[str] = encoder_stride
lowerCAmelCase__ :Tuple = out_features
lowerCAmelCase__ :Tuple = out_indices
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Any = None
if self.use_labels:
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaskFormerSwinModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ :Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = MaskFormerSwinBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[str] = ['stem']
lowerCAmelCase__ :Dict = MaskFormerSwinBackbone(config=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = config_and_inputs
lowerCAmelCase__ :Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ :List[Any] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
__magic_name__ :List[str] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Union[str, Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MaskFormerSwinModelTester(self )
lowerCAmelCase__ :Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase__ :Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = outputs.hidden_states
lowerCAmelCase__ :Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swin has a different seq_length
lowerCAmelCase__ :Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ :Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ :Dict = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Dict = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ :Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ :Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :str = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = 0
return t
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
with torch.no_grad():
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Any = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has"
F" `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}."
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
lowerCAmelCase__ :int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , a ):
"""simple docstring"""
__magic_name__ :Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__magic_name__ :str = MaskFormerSwinConfig
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = MaskFormerSwinModelTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Optional[Any] = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase__ :Optional[int] = backbone_class(__UpperCAmelCase )
backbone.to(__UpperCAmelCase )
backbone.eval()
lowerCAmelCase__ :List[str] = backbone(**__UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase__ :Any = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase__ :Tuple = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 93
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ):
__snake_case : List[str] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__snake_case : int = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__snake_case : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__snake_case : Tuple = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__snake_case : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
__snake_case : Any = dct.pop(__UpperCAmelCase )
__snake_case : Optional[int] = val
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if "handwritten" in checkpoint_url:
__snake_case : Any = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case : Union[str, Any] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__snake_case : Any = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str ):
__snake_case : List[Any] = ViTConfig(image_size=3_84 , qkv_bias=__UpperCAmelCase )
__snake_case : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__snake_case : List[Any] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
__snake_case : str = 10_24
__snake_case : List[Any] = 40_96
__snake_case : Tuple = 24
__snake_case : Dict = 16
__snake_case : Union[str, Any] = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case : Optional[Any] = False
__snake_case : List[Any] = 'relu'
__snake_case : List[str] = 10_24
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
__snake_case : Optional[Any] = False
# load HuggingFace model
__snake_case : List[Any] = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase )
__snake_case : Dict = TrOCRForCausalLM(__UpperCAmelCase )
__snake_case : Optional[Any] = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
__snake_case : Any = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' , check_hash=__UpperCAmelCase )['model']
__snake_case : int = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__snake_case : Dict = state_dict.pop(__UpperCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
__snake_case : Optional[int] = val
else:
__snake_case : Tuple = val
# load state dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
__snake_case : Dict = ViTImageProcessor(size=encoder_config.image_size )
__snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-large' )
__snake_case : List[Any] = TrOCRProcessor(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = processor(images=prepare_img(__UpperCAmelCase ) , return_tensors='pt' ).pixel_values
# verify logits
__snake_case : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__snake_case : List[Any] = model(pixel_values=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
__snake_case : List[Any] = outputs.logits
__snake_case : Dict = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
__snake_case : List[str] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__snake_case : Dict = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__snake_case : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__snake_case : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 576
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=8 ) -> Optional[Any]:
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=512 , lowerCAmelCase_=512 ) -> Any:
_snake_case = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_snake_case = np.array(pil_image.convert('''RGB''' ) )
_snake_case = arr.astype(np.floataa ) / 127.5 - 1
_snake_case = np.transpose(lowerCAmelCase_ , [2, 0, 1] )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 )
return image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = min(int(num_inference_steps * strength ) , __lowerCamelCase )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase )}""" )
_snake_case = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase )
_snake_case = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_snake_case = image
else:
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCamelCase )
]
_snake_case = torch.cat(__lowerCamelCase , dim=0 )
else:
_snake_case = self.movq.encode(__lowerCamelCase ).latent_dist.sample(__lowerCamelCase )
_snake_case = self.movq.config.scaling_factor * init_latents
_snake_case = torch.cat([init_latents] , dim=0 )
_snake_case = init_latents.shape
_snake_case = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
# get latents
_snake_case = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = init_latents
return latents
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Dict=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Dict=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_snake_case = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCamelCase )
def __call__( self : Dict , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : int = 5_1_2 , __lowerCamelCase : int = 5_1_2 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : float = 0.3 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
"""simple docstring"""
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = torch.cat(__lowerCamelCase , dim=0 )
_snake_case = image_embeds.shape[0]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = torch.cat(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [image]
if not all(isinstance(__lowerCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(__lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_snake_case = torch.cat([prepare_image(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for i in image] , dim=0 )
_snake_case = image.to(dtype=image_embeds.dtype , device=__lowerCamelCase )
_snake_case = self.movq.encode(__lowerCamelCase )['''latents''']
_snake_case = latents.repeat_interleave(__lowerCamelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase )
_snake_case , _snake_case = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_snake_case , _snake_case = downscale_height_and_width(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor )
_snake_case = self.prepare_latents(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase )
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {'''image_embeds''': image_embeds}
_snake_case = self.unet(
sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , )[0]
# post-processing
_snake_case = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 701
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[str] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) )
_snake_case = np.random.RandomState(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 404
| 0
|
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(__snake_case )
_UpperCamelCase , _UpperCamelCase = divmod(__snake_case , 2 )
return binary_recursive(__snake_case ) + str(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = str(__snake_case ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCamelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCamelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(__snake_case ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10
|
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A =[num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCamelCase_ = []
for num in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def lowerCamelCase_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.