code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : int , __lowercase : Any=None , __lowercase : Dict=None ):
"""simple docstring"""
snake_case_ = data
snake_case_ = previous
snake_case_ = next_node
def __str__( self : List[str] ):
"""simple docstring"""
return f"{self.data}"
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return self.data
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return self.next
def snake_case__ ( self : str ):
"""simple docstring"""
return self.previous
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = head
def __iter__( self : List[str] ):
"""simple docstring"""
return self
def snake_case__ ( self : Tuple ):
"""simple docstring"""
if not self.current:
raise StopIteration
else:
snake_case_ = self.current.get_data()
snake_case_ = self.current.get_next()
return value
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
snake_case_ = None # First node in list
snake_case_ = None # Last node in list
def __str__( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.head
snake_case_ = []
while current is not None:
nodes.append(current.get_data() )
snake_case_ = current.get_next()
return " ".join(str(__lowercase ) for node in nodes )
def __contains__( self : str , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.head
while current:
if current.get_data() == value:
return True
snake_case_ = current.get_next()
return False
def __iter__( self : List[str] ):
"""simple docstring"""
return LinkedListIterator(self.head )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def snake_case__ ( self : Tuple ):
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def snake_case__ ( self : Tuple , __lowercase : Node ):
"""simple docstring"""
if self.head is None:
snake_case_ = node
snake_case_ = node
else:
self.insert_before_node(self.head , __lowercase )
def snake_case__ ( self : Optional[Any] , __lowercase : Node ):
"""simple docstring"""
if self.head is None:
self.set_head(__lowercase )
else:
self.insert_after_node(self.tail , __lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : int ):
"""simple docstring"""
snake_case_ = Node(__lowercase )
if self.head is None:
self.set_head(__lowercase )
else:
self.set_tail(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : Node , __lowercase : Node ):
"""simple docstring"""
snake_case_ = node
snake_case_ = node.previous
if node.get_previous() is None:
snake_case_ = node_to_insert
else:
snake_case_ = node_to_insert
snake_case_ = node_to_insert
def snake_case__ ( self : str , __lowercase : Node , __lowercase : Node ):
"""simple docstring"""
snake_case_ = node
snake_case_ = node.next
if node.get_next() is None:
snake_case_ = node_to_insert
else:
snake_case_ = node_to_insert
snake_case_ = node_to_insert
def snake_case__ ( self : List[str] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
snake_case_ = 1
snake_case_ = Node(__lowercase )
snake_case_ = self.head
while node:
if current_position == position:
self.insert_before_node(__lowercase , __lowercase )
return
current_position += 1
snake_case_ = node.next
self.insert_after_node(self.tail , __lowercase )
def snake_case__ ( self : str , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.head
while node:
if node.get_data() == item:
return node
snake_case_ = node.get_next()
raise Exception("Node not found" )
def snake_case__ ( self : Optional[int] , __lowercase : Dict ):
"""simple docstring"""
if (node := self.get_node(__lowercase )) is not None:
if node == self.head:
snake_case_ = self.head.get_next()
if node == self.tail:
snake_case_ = self.tail.get_previous()
self.remove_node_pointers(__lowercase )
@staticmethod
def snake_case__ ( __lowercase : Node ):
"""simple docstring"""
if node.get_next():
snake_case_ = node.previous
if node.get_previous():
snake_case_ = node.next
snake_case_ = None
snake_case_ = None
def snake_case__ ( self : Any ):
"""simple docstring"""
return self.head is None
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def is_valid_tree(_A ) -> bool:
if node is None:
return True
if not isinstance(_A , _A ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_A ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_A , _A , _A ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _A , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _A )
)
return is_binary_search_tree_recursive_check(_A , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 1
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE : str = 100
SCREAMING_SNAKE_CASE : str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowercase ( _snake_case : int ) ->set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase ( _snake_case : int = 5_000 ) ->int | None:
"""simple docstring"""
for number_to_partition in range(1 , _snake_case ):
if len(partition(_snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 229
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE : Any = 3
def lowercase ( _snake_case : int ) ->int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
__snake_case : List[Any] = random.randrange(3 , _snake_case )
if pow(_snake_case , 2 , _snake_case ) == 1:
continue
if pow(_snake_case , _snake_case , _snake_case ) == 1:
continue
return g
def lowercase ( _snake_case : int ) ->tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
__snake_case : List[str] = rabin_miller.generate_large_prime(_snake_case ) # select large prime number.
__snake_case : Optional[int] = primitive_root(_snake_case ) # one primitive root on modulo p.
__snake_case : Dict = random.randrange(3 , _snake_case ) # private_key -> have to be greater than 2 for safety.
__snake_case : Any = cryptomath.find_mod_inverse(pow(_snake_case , _snake_case , _snake_case ) , _snake_case )
__snake_case : Union[str, Any] = (key_size, e_a, e_a, p)
__snake_case : Dict = (key_size, d)
return public_key, private_key
def lowercase ( _snake_case : str , _snake_case : int ) ->None:
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__snake_case , __snake_case : Optional[int] = generate_key(_snake_case )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowercase ( ) ->None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 229
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a__ = '''scheduler_config.json'''
class snake_case ( lowerCAmelCase_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = 1
snake_case_ : int = 2
snake_case_ : Tuple = 3
snake_case_ : Any = 4
snake_case_ : Optional[int] = 5
@dataclass
class snake_case ( lowerCAmelCase_ ):
'''simple docstring'''
snake_case_ : int = 42
class snake_case :
'''simple docstring'''
snake_case_ : Dict = SCHEDULER_CONFIG_NAME
snake_case_ : Tuple = ["""dtype"""]
snake_case_ : Any = []
snake_case_ : Tuple = True
@classmethod
def UpperCamelCase_ ( cls : Any , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Optional[Any]=False , **lowerCAmelCase : Tuple , ) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : Any = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_snake_case , _snake_case : int = cls.from_config(A_ , return_unused_kwargs=A_ , **A_)
if hasattr(A_ , """create_state""") and getattr(A_ , """has_state""" , A_):
_snake_case : Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple = False , **lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_)
@property
def UpperCamelCase_ ( self : Tuple) -> Any:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase_ ( cls : List[Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = list(set([cls.__name__] + cls._compatibles))
_snake_case : List[str] = importlib.import_module(__name__.split(""".""")[0])
_snake_case : Dict = [
getattr(A_ , A_) for c in compatible_classes_str if hasattr(A_ , A_)
]
return compatible_classes
def lowercase ( SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : Tuple[int] ) -> jnp.ndarray:
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=0.9_9_9 , SCREAMING_SNAKE_CASE__ : Any=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(SCREAMING_SNAKE_CASE__ : Any ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
_snake_case : List[str] = []
for i in range(_UpperCamelCase ):
_snake_case : int = i / num_diffusion_timesteps
_snake_case : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class snake_case :
'''simple docstring'''
snake_case_ : List[str] = 42
snake_case_ : int = 42
snake_case_ : Any = 42
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = scheduler.config
if config.trained_betas is not None:
_snake_case : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_snake_case : List[str] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : int = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : List[str] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_snake_case : str = 1.0 - betas
_snake_case : Tuple = jnp.cumprod(A_ , axis=0)
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def lowercase ( SCREAMING_SNAKE_CASE__ : CommonSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> List[Any]:
_snake_case : Any = state.alphas_cumprod
_snake_case : Optional[int] = alphas_cumprod[timesteps] ** 0.5
_snake_case : int = sqrt_alpha_prod.flatten()
_snake_case : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
_snake_case : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
_snake_case : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
_snake_case : str = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase ( SCREAMING_SNAKE_CASE__ : CommonSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> str:
_snake_case , _snake_case : Union[str, Any] = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase ( SCREAMING_SNAKE_CASE__ : CommonSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> Any:
_snake_case , _snake_case : str = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 477
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__( _UpperCamelCase : Optional[int] , _UpperCamelCase : bool = True , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : bool = False , _UpperCamelCase : float = 100 , _UpperCamelCase : float = 0.01 , _UpperCamelCase : float = 1 , )-> Any:
"""simple docstring"""
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(_UpperCamelCase )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(_UpperCamelCase )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] )-> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : Any = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case_ : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] )-> Any:
"""simple docstring"""
return (3 * x**2) - (6 * y)
snake_case_ : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
snake_case_ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 138
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = ['input_values', 'attention_mask']
def __init__( self ,a_ = 1 ,a_ = 1_6000 ,a_ = 0.0 ,a_ = False ,a_ = 80 ,a_ = 16 ,a_ = 64 ,a_ = "hann_window" ,a_ = 1.0 ,a_ = 80 ,a_ = 7600 ,a_ = 1e-1_0 ,a_ = 2 ,a_ = True ,**a_ ,):
"""simple docstring"""
super().__init__(feature_size=a_ ,sampling_rate=a_ ,padding_value=a_ ,**a_ )
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = frame_signal_scale
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = reduction_factor
lowerCAmelCase__ = win_length * sampling_rate // 1000
lowerCAmelCase__ = hop_length * sampling_rate // 1000
lowerCAmelCase__ = optimal_fft_length(self.sample_size )
lowerCAmelCase__ = (self.n_fft // 2) + 1
lowerCAmelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=a_ )
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm='slaney' ,mel_scale='slaney' ,)
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' ,a_ ,)
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' ,a_ ,)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE_ ( a_ ,a_ ,a_ = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase__ = np.array(a_ ,np.intaa )
lowerCAmelCase__ = []
for vector, length in zip(a_ ,attention_mask.sum(-1 ) ):
lowerCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase__ = padding_value
normed_input_values.append(a_ )
else:
lowerCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,):
"""simple docstring"""
lowerCAmelCase__ = spectrogram(
a_ ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel='log10' ,)
return log_mel_spec.T
def __call__( self ,a_ = None ,a_ = None ,a_ = False ,a_ = None ,a_ = False ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,**a_ ,):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowerCAmelCase__ = self._process_audio(
a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,**a_ ,)
else:
lowerCAmelCase__ = None
if audio_target is not None:
lowerCAmelCase__ = self._process_audio(
a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,**a_ ,)
if inputs is None:
return inputs_target
else:
lowerCAmelCase__ = inputs_target['input_values']
lowerCAmelCase__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = False ,a_ = False ,a_ = None ,a_ = False ,a_ = None ,a_ = None ,a_ = None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = isinstance(a_ ,np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(a_ ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(a_ ,dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ ,np.ndarray ):
lowerCAmelCase__ = np.asarray(a_ ,dtype=np.floataa )
elif isinstance(a_ ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase__ = [self._extract_mel_features(a_ ) for waveform in speech]
lowerCAmelCase__ = BatchFeature({'input_values': features} )
lowerCAmelCase__ = self.num_mel_bins
else:
lowerCAmelCase__ = BatchFeature({'input_values': speech} )
lowerCAmelCase__ = self.pad(
a_ ,padding=a_ ,max_length=a_ ,truncation=a_ ,pad_to_multiple_of=a_ ,return_attention_mask=a_ ,**a_ ,)
lowerCAmelCase__ = feature_size_hack
# convert input values to correct format
lowerCAmelCase__ = padded_inputs['input_values']
if not isinstance(input_values[0] ,np.ndarray ):
lowerCAmelCase__ = [np.asarray(a_ ,dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ ,np.ndarray )
and isinstance(input_values[0] ,np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(a_ ,dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase__ = (
attention_mask
if self._get_padding_strategies(a_ ,max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] ,attention_mask=a_ ,padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 715
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_=13 ,a_=32 ,a_=2 ,a_=3 ,a_=16 ,a_=[1, 2, 1] ,a_=[2, 2, 4] ,a_=2 ,a_=2.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=True ,a_=0.02 ,a_=1e-5 ,a_=True ,a_=None ,a_=True ,a_=10 ,a_=8 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,embed_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(a_ ) ,a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = config.window_size**2
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowerCAmelCase__ = len(a_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
lowerCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states ,len(a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) ,a_ )
# Swinv2 has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(a_ ,a_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 604
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=() ,lowerCAmelCase__=None ,lowerCAmelCase__="no" ,lowerCAmelCase__="29500" ):
lowerCamelCase_ = False
lowerCamelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCamelCase_ = True
elif "IPython" in sys.modules:
lowerCamelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCamelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,lowerCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCamelCase_ = 8
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*lowerCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase__ ,master_addr='''127.0.01''' ,master_port=lowerCAmelCase__ ,mixed_precision=lowerCAmelCase__ ):
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=() ,lowerCAmelCase__=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase__ ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
lowerCamelCase_ = PrepareForLaunch(lowerCAmelCase__ ,debug=lowerCAmelCase__ )
start_processes(lowerCAmelCase__ ,args=lowerCAmelCase__ ,nprocs=lowerCAmelCase__ ,start_method='''fork''' )
| 29
|
from __future__ import annotations
class _A :
def __init__( self : List[str] , lowerCamelCase__ : Any=None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = data
__UpperCamelCase : Union[str, Any] = None
def __repr__( self : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Optional[int] = self
while temp:
string_rep.append(f'{temp.data}' )
__UpperCamelCase : Dict = temp.next
return "->".join(lowerCamelCase__ )
def __lowerCamelCase ( __lowerCAmelCase : list ) -> Tuple:
if not elements_list:
raise Exception("""The Elements List is empty""" )
__UpperCamelCase : Dict = Node(elements_list[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
__UpperCamelCase : List[str] = Node(elements_list[i] )
__UpperCamelCase : Optional[int] = current.next
return head
def __lowerCamelCase ( __lowerCAmelCase : Node ) -> None:
if head_node is not None and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
__UpperCamelCase : Any = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(__lowerCAmelCase )
print("""Elements in Reverse:""" )
print_reverse(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 269
| 0
|
import numpy
# List of input, output pairs
__UpperCamelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCamelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__UpperCamelCase : List[Any] = [2, 4, 1, 5]
__UpperCamelCase : Union[str, Any] = len(train_data)
__UpperCamelCase : List[Any] = 0.0_0_9
def snake_case ( lowerCamelCase , lowerCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case ( lowerCamelCase , lowerCamelCase=m ):
'''simple docstring'''
__lowercase = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def snake_case ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
__lowercase = get_cost_derivative(i - 1 )
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
__lowercase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def snake_case ( ):
'''simple docstring'''
for i in range(len(lowerCamelCase ) ):
print(("""Actual output value:""", output(lowerCamelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCamelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import argparse
import copy
def _lowerCamelCase ( __A : int ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = {}
with open(__A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_UpperCAmelCase : Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
_UpperCAmelCase : List[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_UpperCAmelCase : Union[str, Any] = []
_list.append([line.split()[0], line.split()[2]] )
_UpperCAmelCase : str = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCamelCase ( __A : Any , __A : Dict ) -> Optional[Any]:
with open(__A ) as f:
_UpperCAmelCase : Union[str, Any] = f.read(1 )
_UpperCAmelCase : Union[str, Any] = start_node
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[int] = start_node
_UpperCAmelCase : Optional[int] = 0
while visiting not in first_solution:
_UpperCAmelCase : int = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__A ) and k[0] not in first_solution:
_UpperCAmelCase : Dict = k[1]
_UpperCAmelCase : List[str] = k[0]
first_solution.append(__A )
_UpperCAmelCase : Optional[int] = distance_of_first_solution + int(__A )
_UpperCAmelCase : Any = best_node
first_solution.append(__A )
_UpperCAmelCase : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_UpperCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def _lowerCamelCase ( __A : Tuple , __A : List[Any] ) -> Optional[Any]:
_UpperCAmelCase : Any = []
for n in solution[1:-1]:
_UpperCAmelCase : Tuple = solution.index(__A )
for kn in solution[1:-1]:
_UpperCAmelCase : Any = solution.index(__A )
if n == kn:
continue
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(__A )
_UpperCAmelCase : Union[str, Any] = kn
_UpperCAmelCase : Optional[int] = n
_UpperCAmelCase : int = 0
for k in _tmp[:-1]:
_UpperCAmelCase : Tuple = _tmp[_tmp.index(__A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_UpperCAmelCase : int = distance + int(i[1] )
_tmp.append(__A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_UpperCAmelCase : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCamelCase ( __A : Any , __A : List[Any] , __A : str , __A : str , __A : str ) -> Any:
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : int = first_solution
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = distance_of_first_solution
_UpperCAmelCase : Optional[int] = solution
while count <= iters:
_UpperCAmelCase : Dict = find_neighborhood(__A , __A )
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Tuple = neighborhood[index_of_best_solution]
_UpperCAmelCase : str = len(__A ) - 1
_UpperCAmelCase : str = False
while not found:
_UpperCAmelCase : Union[str, Any] = 0
while i < len(__A ):
if best_solution[i] != solution[i]:
_UpperCAmelCase : Dict = best_solution[i]
_UpperCAmelCase : Any = solution[i]
break
_UpperCAmelCase : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_UpperCAmelCase : str = True
_UpperCAmelCase : int = best_solution[:-1]
_UpperCAmelCase : Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_UpperCAmelCase : List[str] = cost
_UpperCAmelCase : str = solution
else:
_UpperCAmelCase : List[Any] = index_of_best_solution + 1
_UpperCAmelCase : Any = neighborhood[index_of_best_solution]
if len(__A ) >= size:
tabu_list.pop(0 )
_UpperCAmelCase : List[str] = count + 1
return best_solution_ever, best_cost
def _lowerCamelCase ( __A : List[str]=None ) -> Dict:
_UpperCAmelCase : List[Any] = generate_neighbours(args.File )
_UpperCAmelCase , _UpperCAmelCase : Dict = generate_first_solution(
args.File , __A )
_UpperCAmelCase , _UpperCAmelCase : str = tabu_search(
__A , __A , __A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 485
|
import tensorflow as tf
from ...tf_utils import shape_list
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False , **_A) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Any = d_embed
_UpperCAmelCase : List[Any] = d_proj
_UpperCAmelCase : List[Any] = cutoffs + [vocab_size]
_UpperCAmelCase : str = [0] + self.cutoffs
_UpperCAmelCase : Union[str, Any] = div_val
_UpperCAmelCase : Any = self.cutoffs[0]
_UpperCAmelCase : Optional[Any] = len(self.cutoffs) - 1
_UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters
_UpperCAmelCase : List[Any] = keep_order
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = []
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
if self.n_clusters > 0:
_UpperCAmelCase : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_A , name='''cluster_weight''')
_UpperCAmelCase : Tuple = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_A , name='''cluster_bias''')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
_UpperCAmelCase : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_A , name=f'''out_projs_._{i}''' , )
self.out_projs.append(_A)
else:
self.out_projs.append(_A)
_UpperCAmelCase : str = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
_UpperCAmelCase : Optional[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Optional[Any] = self.d_embed // (self.div_val**i)
_UpperCAmelCase : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_A , name=f'''out_projs_._{i}''')
self.out_projs.append(_A)
_UpperCAmelCase : Dict = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , )
_UpperCAmelCase : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
super().build(_A)
@staticmethod
def snake_case__ ( _A , _A , _A , _A=None) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = x
if proj is not None:
_UpperCAmelCase : Optional[int] = tf.einsum('''ibd,ed->ibe''' , _A , _A)
return tf.einsum('''ibd,nd->ibn''' , _A , _A) + b
@staticmethod
def snake_case__ ( _A , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = shape_list(_A)
_UpperCAmelCase : int = tf.range(lp_size[0] , dtype=target.dtype)
_UpperCAmelCase : Optional[Any] = tf.stack([r, target] , 1)
return tf.gather_nd(_A , _A)
def snake_case__ ( self , _A , _A , _A=True , _A=False) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0
if self.n_clusters == 0:
_UpperCAmelCase : int = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
_UpperCAmelCase : Any = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A)
_UpperCAmelCase : Union[str, Any] = tf.nn.log_softmax(_A , axis=-1)
else:
_UpperCAmelCase : Union[str, Any] = shape_list(_A)
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[Any] = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_UpperCAmelCase : Any = (target >= l_idx) & (target < r_idx)
_UpperCAmelCase : str = tf.where(_A)
_UpperCAmelCase : Union[str, Any] = tf.boolean_mask(_A , _A) - l_idx
if self.div_val == 1:
_UpperCAmelCase : str = self.out_layers[0][0][l_idx:r_idx]
_UpperCAmelCase : Any = self.out_layers[0][1][l_idx:r_idx]
else:
_UpperCAmelCase : int = self.out_layers[i][0]
_UpperCAmelCase : int = self.out_layers[i][1]
if i == 0:
_UpperCAmelCase : Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0)
_UpperCAmelCase : Optional[int] = tf.concat([cur_b, self.cluster_bias] , 0)
_UpperCAmelCase : int = self._logit(_A , _A , _A , self.out_projs[0])
_UpperCAmelCase : int = tf.nn.log_softmax(_A)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
_UpperCAmelCase : List[str] = tf.boolean_mask(_A , _A)
_UpperCAmelCase : Optional[Any] = self._gather_logprob(_A , _A)
else:
_UpperCAmelCase : List[str] = self._logit(_A , _A , _A , self.out_projs[i])
_UpperCAmelCase : Union[str, Any] = tf.nn.log_softmax(_A)
_UpperCAmelCase : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
_UpperCAmelCase : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A)
if target is not None:
_UpperCAmelCase : Optional[Any] = tf.boolean_mask(_A , _A)
_UpperCAmelCase : str = tf.boolean_mask(_A , _A)
_UpperCAmelCase : Optional[Any] = self._gather_logprob(_A , _A)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A))
_UpperCAmelCase : Optional[Any] = tf.concat(_A , axis=-1)
if target is not None:
if return_mean:
_UpperCAmelCase : Optional[int] = tf.reduce_mean(_A)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='''mean''' if return_mean else '''''')
return out
| 485
| 1
|
'''simple docstring'''
from math import ceil
def __lowerCamelCase ( _UpperCamelCase : int = 1001 ):
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ = 2 * i + 1
UpperCAmelCase_ = 2 * i
UpperCAmelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase__ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 43
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE_ = True
except ImportError:
SCREAMING_SNAKE_CASE_ = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE_ = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE_ = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
SCREAMING_SNAKE_CASE_ = os.path.join(torch_cache_home, 'transformers')
SCREAMING_SNAKE_CASE_ = 'https://cdn.huggingface.co'
SCREAMING_SNAKE_CASE_ = 'https://s3.amazonaws.com/models.huggingface.co/bert'
SCREAMING_SNAKE_CASE_ = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'config.yaml')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'attributes.txt')
SCREAMING_SNAKE_CASE_ = os.path.join(PATH, 'objects.txt')
SCREAMING_SNAKE_CASE_ = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
SCREAMING_SNAKE_CASE_ = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE_ = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE_ = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE_ = 'config.yaml'
def __snake_case ( _lowercase=OBJECTS ,_lowercase=ATTRIBUTES ):
"""simple docstring"""
UpperCamelCase = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
UpperCamelCase = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
with open(_lowercase ,'''rb''' ) as f:
UpperCamelCase = pkl.load(_lowercase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase = ckp.pop(_lowercase )
if isinstance(_lowercase ,np.ndarray ):
UpperCamelCase = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase ,torch.tensor ), type(_lowercase )
UpperCamelCase = v
return r
class snake_case_ :
"""simple docstring"""
A_ = {}
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "root" , lowerCamelCase_=0) -> str:
UpperCamelCase = name
UpperCamelCase = level
UpperCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase = copy.deepcopy(lowerCamelCase_)
UpperCamelCase = copy.deepcopy(lowerCamelCase_)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = Config(lowerCamelCase_ , name=lowerCamelCase_ , level=level + 1)
UpperCamelCase = v
setattr(self , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = d
def __repr__( self) -> List[Any]:
return str(list((self._pointer.keys())))
def __setattr__( self , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = key.split('''.''')
UpperCamelCase = len(lowerCamelCase_) - 1
UpperCamelCase = self._pointer
if len(lowerCamelCase_) > 1:
for i, l in enumerate(lowerCamelCase_):
if hasattr(self , lowerCamelCase_) and isinstance(getattr(self , lowerCamelCase_) , lowerCamelCase_):
setattr(getattr(self , lowerCamelCase_) , '''.'''.join(levels[i:]) , lowerCamelCase_)
if l == last_level:
UpperCamelCase = val
else:
UpperCamelCase = pointer[l]
def UpperCAmelCase__ ( self) -> Any:
return self._pointer
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
with open(F'{file_name}' , '''w''') as stream:
dump(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
with open(F'{file_name}' , '''w''') as stream:
json.dump(lowerCamelCase_ , lowerCamelCase_)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> Optional[Any]:
with open(lowerCamelCase_) as stream:
UpperCamelCase = load(lowerCamelCase_ , Loader=lowerCamelCase_)
return data
def __str__( self) -> List[Any]:
UpperCamelCase = ''' '''
if self._name != "root":
UpperCamelCase = F'{t * (self._level-1)}{self._name}:\n'
else:
UpperCamelCase = ''''''
UpperCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowerCamelCase_).__name__})\n'
UpperCamelCase = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
UpperCamelCase , UpperCamelCase = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_)
return cls(lowerCamelCase_)
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , **lowerCamelCase_) -> Dict:
UpperCamelCase = kwargs.pop('''cache_dir''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''force_download''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''resume_download''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''proxies''' , lowerCamelCase_)
UpperCamelCase = kwargs.pop('''local_files_only''' , lowerCamelCase_)
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_)
elif os.path.isfile(lowerCamelCase_) or is_remote_url(lowerCamelCase_):
UpperCamelCase = pretrained_model_name_or_path
else:
UpperCamelCase = hf_bucket_url(lowerCamelCase_ , filename=lowerCamelCase_ , use_cdn=lowerCamelCase_)
try:
# Load from URL or cache if already cached
UpperCamelCase = cached_path(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase = Config.load_yaml(lowerCamelCase_)
except EnvironmentError:
UpperCamelCase = '''Can\'t load config for'''
raise EnvironmentError(lowerCamelCase_)
if resolved_config_file == config_file:
print('''loading configuration file from path''')
else:
print('''loading configuration file cache''')
return Config.load_yaml(lowerCamelCase_), kwargs
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = torch.load('''dump.pt''' ,map_location=in_tensor.device )
UpperCamelCase = in_tensor.numpy()
UpperCamelCase = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_lowercase ,_lowercase ,rtol=0.01 ,atol=0.1 ), (
f'{sum([1 for x in np.isclose(_lowercase ,_lowercase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def __snake_case ( _lowercase ,_lowercase ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def __snake_case ( _lowercase ,_lowercase ,_lowercase=None ,_lowercase=0 ,_lowercase=None ,):
"""simple docstring"""
UpperCamelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase ,_lowercase ):
ua += "; " + "; ".join('''{}/{}'''.format(_lowercase ,_lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase ,_lowercase ):
ua += "; " + user_agent
UpperCamelCase = {'''user-agent''': ua}
if resume_size > 0:
UpperCamelCase = '''bytes=%d-''' % (resume_size,)
UpperCamelCase = requests.get(_lowercase ,stream=_lowercase ,proxies=_lowercase ,headers=_lowercase )
if response.status_code == 416: # Range not satisfiable
return
UpperCamelCase = response.headers.get('''Content-Length''' )
UpperCamelCase = resume_size + int(_lowercase ) if content_length is not None else None
UpperCamelCase = tqdm(
unit='''B''' ,unit_scale=_lowercase ,total=_lowercase ,initial=_lowercase ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=10 ,_lowercase=False ,_lowercase=None ,_lowercase=False ,):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
os.makedirs(_lowercase ,exist_ok=_lowercase )
UpperCamelCase = None
if not local_files_only:
try:
UpperCamelCase = requests.head(_lowercase ,allow_redirects=_lowercase ,proxies=_lowercase ,timeout=_lowercase )
if response.status_code == 200:
UpperCamelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase = url_to_filename(_lowercase ,_lowercase )
# get cache path to put the file
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
UpperCamelCase = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase = cache_path + '''.lock'''
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowercase ,'''a+b''' ) as f:
yield f
UpperCamelCase = _resumable_file_manager
if os.path.exists(_lowercase ):
UpperCamelCase = os.stat(_lowercase ).st_size
else:
UpperCamelCase = 0
else:
UpperCamelCase = partial(tempfile.NamedTemporaryFile ,dir=_lowercase ,delete=_lowercase )
UpperCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,_lowercase ,temp_file.name ,)
http_get(
_lowercase ,_lowercase ,proxies=_lowercase ,resume_size=_lowercase ,user_agent=_lowercase ,)
os.replace(temp_file.name ,_lowercase )
UpperCamelCase = {'''url''': url, '''etag''': etag}
UpperCamelCase = cache_path + '''.json'''
with open(_lowercase ,'''w''' ) as meta_file:
json.dump(_lowercase ,_lowercase )
return cache_path
def __snake_case ( _lowercase ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = url.encode('''utf-8''' )
UpperCamelCase = shaaaa(_lowercase )
UpperCamelCase = url_hash.hexdigest()
if etag:
UpperCamelCase = etag.encode('''utf-8''' )
UpperCamelCase = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=False ,_lowercase=None ,_lowercase=False ,_lowercase=False ,_lowercase=False ,):
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase = get_from_cache(
_lowercase ,cache_dir=_lowercase ,force_download=_lowercase ,proxies=_lowercase ,resume_download=_lowercase ,user_agent=_lowercase ,local_files_only=_lowercase ,)
elif os.path.exists(_lowercase ):
# File, and it exists.
UpperCamelCase = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(_lowercase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase , UpperCamelCase = os.path.split(_lowercase )
UpperCamelCase = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase = output_path + '''.lock'''
with FileLock(_lowercase ):
shutil.rmtree(_lowercase ,ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase ,'''r''' ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
UpperCamelCase = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(_lowercase ) )
return output_path_extracted
return output_path
def __snake_case ( _lowercase ,_lowercase="," ):
"""simple docstring"""
assert isinstance(_lowercase ,_lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
UpperCamelCase = eval(f.read() )
else:
UpperCamelCase = requests.get(_lowercase )
try:
UpperCamelCase = requests.json()
except Exception:
UpperCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase = eval(_lowercase )
except Exception:
UpperCamelCase = data.split('''\n''' )
req.close()
return data
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = requests.get(_lowercase )
UpperCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase ,'''rb''' ) as stream:
UpperCamelCase = pkl.load(_lowercase )
UpperCamelCase = weights.pop('''model''' )
UpperCamelCase = {}
for k, v in model.items():
UpperCamelCase = torch.from_numpy(_lowercase )
if "running_var" in k:
UpperCamelCase = torch.tensor([0] )
UpperCamelCase = k.replace('''running_var''' ,'''num_batches_tracked''' )
UpperCamelCase = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'{os.path.abspath(os.path.join(_lowercase ,os.pardir ) )}/demo.ipynb' )
def __snake_case ( _lowercase ,_lowercase="RGB" ):
"""simple docstring"""
assert isinstance(_lowercase ,_lowercase )
if os.path.isfile(_lowercase ):
UpperCamelCase = cva.imread(_lowercase )
else:
UpperCamelCase = get_image_from_url(_lowercase )
assert img is not None, f'could not connect to: {im}'
UpperCamelCase = cva.cvtColor(_lowercase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase = img[:, :, ::-1]
return img
def __snake_case ( _lowercase ,_lowercase=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(_lowercase ) ,_lowercase ))
| 34
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 10, __snake_case : int = 22 ) -> int:
"""simple docstring"""
A__ : Any =range(1, __snake_case )
A__ : List[str] =range(1, __snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 215
| 0
|
'''simple docstring'''
class a_ :
def __init__( self : List[Any] , __lowerCAmelCase : int ):
__snake_case = size
__snake_case = [0] * size
__snake_case = [0] * size
@staticmethod
def lowercase__ ( __lowerCAmelCase : int ):
return index | (index + 1)
@staticmethod
def lowercase__ ( __lowerCAmelCase : int ):
return (index & (index + 1)) - 1
def lowercase__ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ):
__snake_case = value
while index < self.size:
__snake_case = self.get_prev(__lowerCAmelCase ) + 1
if current_left_border == index:
__snake_case = value
else:
__snake_case = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.get_next(__lowerCAmelCase )
def lowercase__ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
right -= 1 # Because of right is exclusive
__snake_case = 0
while left <= right:
__snake_case = self.get_prev(__lowerCAmelCase )
if left <= current_left:
__snake_case = max(__lowerCAmelCase , self.tree[right] )
__snake_case = current_left
else:
__snake_case = max(__lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : int = RobertaTokenizer
lowercase_ : int = RobertaTokenizerFast
lowercase_ : int = True
lowercase_ : Dict = {'''cls_token''': '''<s>'''}
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ):
__snake_case = 'lower newer'
__snake_case = 'lower newer'
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = 'lower newer'
__snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowercase__ ( self : int ):
__snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
__snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = 'Encode this sequence.'
__snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
__snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
__snake_case = 'Encode <mask> sequence'
__snake_case = 'Encode <mask>sequence'
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = 'A, <mask> AllenNLP sentence.'
__snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
__snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase__ ( self : Optional[int] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = F'{text_of_1_token} {text_of_1_token}'
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 427
| 1
|
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> list[int]:
A = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, lowerCAmelCase, lowerCAmelCase ):
A = False
return [i for i in range(2, lowerCAmelCase ) if is_prime[i]]
def __UpperCamelCase (lowerCAmelCase : int = 10**8 ) -> int:
A = calculate_prime_numbers(max_number // 2 )
A = 0
A = 0
A = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699
| 1
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A : int = logging.getLogger(__name__)
class lowerCamelCase :
def __init__( self : List[str] ):
'''simple docstring'''
_snake_case: Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
if not self.initialized:
_snake_case: Tuple = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
_snake_case: str = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : Dict , __snake_case : int ):
'''simple docstring'''
_snake_case: str = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
_snake_case: Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : Any , __snake_case : List[str] ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_snake_case: Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_snake_case: Dict = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
_snake_case: List[Any] = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , __snake_case : Any , __snake_case : Optional[Any]=None , **__snake_case : str ):
'''simple docstring'''
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Any=None , **__snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = kwargs.pop('config' , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_snake_case: Any = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
_snake_case: Tuple = rag_tokenizer.question_encoder
_snake_case: int = rag_tokenizer.generator
if indexed_dataset is not None:
_snake_case: Tuple = '''custom'''
_snake_case: Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
_snake_case: List[Any] = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ,a__ : List[Any] ,a__ : List[Any] ) -> List[Any]:
__A : List[str] = os.path.abspath(a__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
__A : Optional[Any] = tf.train.list_variables(a__ )
__A : Union[str, Any] = []
__A : List[str] = []
__A : List[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__A : List[str] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
__A : int = name[1:]
# figure out how many levels deep the name is
__A : Optional[Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(a__ )
# read data
__A : Optional[Any] = tf.train.load_variable(a__ ,a__ )
names.append("""/""".join(a__ ) )
arrays.append(a__ )
logger.info(f"""Read a total of {len(a__ ):,} layers""" )
# Sanity check
if len(set(a__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(a__ ) )})""" )
__A : Union[str, Any] = list(set(a__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(a__ ,a__ ):
__A : Optional[Any] = full_name.split("""/""" )
__A : Any = model
__A : List[str] = []
for i, m_name in enumerate(a__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__A : Tuple = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__A : Any = getattr(a__ ,"""embeddings""" )
__A : int = getattr(a__ ,"""LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__A : Dict = getattr(a__ ,"""encoder""" )
__A : Dict = getattr(a__ ,"""layer""" )
__A : Optional[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__A : List[str] = getattr(a__ ,"""pooler""" )
__A : str = getattr(a__ ,"""dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__A : Optional[Any] = getattr(a__ ,"""embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__A : Any = getattr(a__ ,"""word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__A : Optional[int] = getattr(a__ ,"""position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__A : List[Any] = getattr(a__ ,"""token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
__A : int = getattr(a__ ,"""weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__A : Union[str, Any] = getattr(a__ ,"""attention""" )
__A : List[Any] = getattr(a__ ,"""self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__A : List[str] = getattr(a__ ,"""attention""" )
__A : int = getattr(a__ ,"""output""" )
__A : List[Any] = getattr(a__ ,"""LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__A : Union[str, Any] = getattr(a__ ,"""attention""" )
__A : List[str] = getattr(a__ ,"""output""" )
__A : Optional[Any] = getattr(a__ ,"""dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__A : List[Any] = getattr(a__ ,"""output""" )
__A : int = getattr(a__ ,"""dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__A : int = getattr(a__ ,"""output""" )
__A : Optional[int] = getattr(a__ ,"""LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__A : Union[str, Any] = getattr(a__ ,"""key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__A : str = getattr(a__ ,"""query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__A : List[str] = getattr(a__ ,"""value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__A : str = getattr(a__ ,"""intermediate""" )
__A : Union[str, Any] = getattr(a__ ,"""dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__A : Optional[Any] = getattr(a__ ,"""output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__A : int = getattr(a__ ,"""bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__A : Any = getattr(a__ ,"""weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
__A : List[Any] = """.""".join(a__ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" ,a__ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" ,a__ ):
__A : int = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__A : int = array.transpose()
if pointer.shape == array.shape:
__A : Dict = torch.from_numpy(a__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : str ,a__ : Optional[int] ) -> Optional[Any]:
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
__A : Tuple = BertConfig.from_json_file(a__ )
__A : Optional[int] = BertModel(a__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(a__ ,a__ ,a__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 17
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676
| 1
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
__magic_name__ : List[Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
__magic_name__ : int = {
"""jukebox""": 512,
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCAmelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=["v3", "v2", "v2"] , lowerCamelCase=512 , lowerCamelCase=5 , lowerCamelCase="<|endoftext|>" , **lowerCamelCase , ):
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
unk_token=lowerCamelCase , n_genres=lowerCamelCase , version=lowerCamelCase , max_n_lyric_tokens=lowerCamelCase , **lowerCamelCase , )
_snake_case = version
_snake_case = max_n_lyric_tokens
_snake_case = n_genres
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
_snake_case = json.load(lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
_snake_case = json.load(lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
_snake_case = json.load(lowerCamelCase )
_snake_case = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_snake_case = oov.replace(R"\-'" , R"\-+'" )
_snake_case = regex.compile(lowerCamelCase )
_snake_case = {v: k for k, v in self.artists_encoder.items()}
_snake_case = {v: k for k, v in self.genres_encoder.items()}
_snake_case = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = [self.artists_encoder.get(lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(lowerCamelCase ) ):
_snake_case = [self.genres_encoder.get(lowerCamelCase , 0 ) for genre in list_genres[genres]]
_snake_case = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_snake_case = [[self.lyrics_encoder.get(lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase( self , lowerCamelCase ):
return list(lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
_snake_case , _snake_case , _snake_case = self.prepare_for_tokenization(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_snake_case = self._tokenize(lowerCamelCase )
return artist, genre, lyrics
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_snake_case = artists[idx].lower()
_snake_case = [genres[idx].lower()]
else:
_snake_case = self._normalize(artists[idx] ) + ".v2"
_snake_case = [
self._normalize(lowerCamelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_snake_case = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
_snake_case = {vocab[index]: index + 1 for index in range(len(lowerCamelCase ) )}
_snake_case = 0
_snake_case = len(lowerCamelCase ) + 1
_snake_case = self.vocab
_snake_case = {v: k for k, v in self.vocab.items()}
_snake_case = ""
else:
_snake_case = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
_snake_case = self._run_strip_accents(lowerCamelCase )
_snake_case = lyrics.replace("\\" , "\n" )
_snake_case = self.out_of_vocab.sub("" , lowerCamelCase ), [], []
return artists, genres, lyrics
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = unicodedata.normalize("NFD" , lowerCamelCase )
_snake_case = []
for char in text:
_snake_case = unicodedata.category(lowerCamelCase )
if cat == "Mn":
continue
output.append(lowerCamelCase )
return "".join(lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = (
[chr(lowerCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(lowerCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(lowerCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
_snake_case = frozenset(lowerCamelCase )
_snake_case = re.compile(R"_+" )
_snake_case = "".join([c if c in accepted else "_" for c in text.lower()] )
_snake_case = pattern.sub("_" , lowerCamelCase ).strip("_" )
return text
def UpperCamelCase( self , lowerCamelCase ):
return " ".join(lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
# Convert to TensorType
if not isinstance(lowerCamelCase , lowerCamelCase ):
_snake_case = TensorType(lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
_snake_case = tf.constant
_snake_case = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
_snake_case = torch.tensor
_snake_case = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
_snake_case = jnp.array
_snake_case = _is_jax
else:
_snake_case = np.asarray
_snake_case = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_snake_case = [inputs]
if not is_tensor(lowerCamelCase ):
_snake_case = as_tensor(lowerCamelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="" , lowerCamelCase="pt" ):
_snake_case = [0, 0, 0]
_snake_case = [artist] * len(self.version )
_snake_case = [genres] * len(self.version )
_snake_case , _snake_case , _snake_case = self.tokenize(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_snake_case , _snake_case , _snake_case = self._convert_token_to_id(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_snake_case = [-INFINITY] * len(full_tokens[-1] )
_snake_case = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCamelCase ) )
_snake_case = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCamelCase ) )
_snake_case = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.artists_decoder.get(lowerCamelCase )
_snake_case = [self.genres_decoder.get(lowerCamelCase ) for genre in genres_index]
_snake_case = [self.lyrics_decoder.get(lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 672
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672
| 1
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None ):
snake_case_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case_ ,snake_case_ : Optional[int] = True, True
snake_case_ : Optional[int] = dfs(__a , __a , __a , __a )
return path
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = 0
snake_case_ : Any = -1
for i in range(__a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case_ : Optional[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Dict = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case_ ,snake_case_ : Any = check_circuit_or_path(__a , __a )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
snake_case_ : Union[str, Any] = 1
if check == 2:
snake_case_ : str = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
snake_case_ : Optional[int] = dfs(__a , __a , __a )
print(__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[int] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case_ : List[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case_ : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case_ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case_ : Dict = {
1: [],
2: []
# all degree is zero
}
snake_case_ : str = 10
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
if __name__ == "__main__":
main()
| 534
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =20
UpperCAmelCase_ =self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
UpperCAmelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase_ =jax.nn.softmax(_lowerCAmelCase , axis=-1 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create ramp distribution
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase_ =top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase_ =np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase_ =ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
UpperCAmelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase_ =5
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =15
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase_ =1
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase_ =4
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
def run_no_processor_list(_lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict ):
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int] ):
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 54
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) ->List[Any]:
lowercase_ = tesseract_config if tesseract_config is not None else """"""
# apply OCR
lowercase_ = to_pil_image(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = pil_image.size
lowercase_ = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type="""dict""" , config=SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowercase_ = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()]
lowercase_ = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE_ )
# finally, normalize the bounding boxes
lowercase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : Tuple , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "" , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = apply_ocr
lowercase_ = ocr_lang
lowercase_ = tesseract_config
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase_ = (size["""height"""], size["""width"""])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Dict , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowercase_ = []
lowercase_ = []
for image in images:
lowercase_ , lowercase_ = apply_tesseract(lowercase_ , lowercase_ , lowercase_ )
words_batch.append(lowercase_ )
boxes_batch.append(lowercase_ )
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase_ = [flip_channel_order(lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase_ )
if apply_ocr:
lowercase_ = words_batch
lowercase_ = boxes_batch
return data
| 451
| 0
|
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.2_5 , width=0.2_5 )
lowerCamelCase_ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text("CPU" , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text("GPU" , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text("Model" , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
lowerCamelCase_ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text("Loaded Checkpoint" , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
lowerCamelCase_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text("Disk" , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 716
|
from typing import Any
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ ):
return np.array_equal(lowerCamelCase__ , matrix.conjugate().T )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = v.conjugate().T
lowerCamelCase_ = v_star.dot(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase__ )) / (v_star.dot(lowerCamelCase__ ))
def lowerCamelCase_ ( ):
lowerCamelCase_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCamelCase_ = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) )
lowerCamelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowerCamelCase__ , lowerCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 313
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _a ( lowerCAmelCase )-> Optional[int]:
SCREAMING_SNAKE_CASE_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 48
SCREAMING_SNAKE_CASE_ = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE_ = 60
SCREAMING_SNAKE_CASE_ = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE_ = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 126
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 255.0
SCREAMING_SNAKE_CASE_ = ''
return config
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[Any]:
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE_ = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = 'layernorm.weight'
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = 'layernorm.bias'
if "conv_first" in name:
SCREAMING_SNAKE_CASE_ = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE_ = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('upsample.2' , 'upsample.convolution_1' )
SCREAMING_SNAKE_CASE_ = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE_ = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
SCREAMING_SNAKE_CASE_ = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
SCREAMING_SNAKE_CASE_ = 'swin2sr.' + name
return name
def _a ( lowerCAmelCase , lowerCAmelCase )-> Tuple:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_split[1] )
SCREAMING_SNAKE_CASE_ = int(key_split[4] )
SCREAMING_SNAKE_CASE_ = config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> str:
SCREAMING_SNAKE_CASE_ = get_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
SCREAMING_SNAKE_CASE_ = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
SCREAMING_SNAKE_CASE_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ = 126 if 'Jpeg' in checkpoint_url else 256
SCREAMING_SNAKE_CASE_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE_ = pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-3 )
print('Looks ok!' )
SCREAMING_SNAKE_CASE_ = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
SCREAMING_SNAKE_CASE_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
SCREAMING_SNAKE_CASE: Tuple = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 360
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=2 , a__=56 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=2 , a__=7 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=4 , a__="block_sparse" , a__=True , a__=False , a__=2 , a__=3 , ) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
A = rescale_embeddings
A = attention_type
A = use_bias
A = block_size
A = num_random_blocks
def _UpperCAmelCase ( self ) -> List[Any]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Optional[int]:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self ) -> Optional[int]:
A = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> Tuple:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> Dict:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_hidden_states_output()
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(a__ , a__ )
A = model_class(a__ )
@jax.jit
def model_jitted(a__ , a__=None , **a__ ):
return model(input_ids=a__ , attention_mask=a__ , **a__ )
with self.subTest("""JIT Enabled""" ):
A = model_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__=1e-5 , a__="outputs" , a__=None ) -> Tuple:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(a__ , a__ , a__ , a__ , a__ , a__ )
| 641
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__a : Tuple = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Any ) -> str:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 701
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = inspect.getfile(accelerate.test_utils )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_SCREAMING_SNAKE_CASE : Optional[Any] = ['''accelerate''', '''launch''']
_SCREAMING_SNAKE_CASE : Optional[int] = Path.home() / '''.cache/huggingface/accelerate'''
_SCREAMING_SNAKE_CASE : str = '''default_config.yaml'''
_SCREAMING_SNAKE_CASE : Optional[int] = config_folder / config_file
_SCREAMING_SNAKE_CASE : Optional[Any] = config_folder / '''_default_config.yaml'''
_SCREAMING_SNAKE_CASE : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : Union[str, Any] ) -> str:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=__UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = '''test-tpu'''
_SCREAMING_SNAKE_CASE : List[str] = '''us-central1-a'''
_SCREAMING_SNAKE_CASE : Optional[int] = '''ls'''
_SCREAMING_SNAKE_CASE : Dict = ['''accelerate''', '''tpu-config''']
_SCREAMING_SNAKE_CASE : List[Any] = '''cd /usr/share'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
_SCREAMING_SNAKE_CASE : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__UpperCAmelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=__UpperCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __UpperCAmelCase , )
| 559
| 0
|
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ={} # Mapping from char to TrieNode
lowercase : Dict =False
def A__ ( self : List[str] , UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
for word in words:
self.insert(__UpperCamelCase )
def A__ ( self : int , UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self
for char in word:
if char not in curr.nodes:
lowercase : Dict =TrieNode()
lowercase : Union[str, Any] =curr.nodes[char]
lowercase : List[str] =True
def A__ ( self : List[Any] , UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
lowercase : str =self
for char in word:
if char not in curr.nodes:
return False
lowercase : Dict =curr.nodes[char]
return curr.is_leaf
def A__ ( self : List[Any] , UpperCAmelCase : str ) -> int:
'''simple docstring'''
def _delete(UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> bool:
if index == len(__UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase : Union[str, Any] =False
return len(curr.nodes ) == 0
lowercase : Dict =word[index]
lowercase : str =curr.nodes.get(__UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase : Dict =_delete(__UpperCamelCase , __UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __UpperCamelCase , 0 )
def lowercase_ ( __A : Tuple , __A : Tuple ) -> str:
"""simple docstring"""
if node.is_leaf:
print(lowercase__ , end=''' ''' )
for key, value in node.nodes.items():
print_words(lowercase__ , word + key )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : int ='''banana bananas bandana band apple all beast'''.split()
lowercase : str =TrieNode()
root.insert_many(lowercase__ )
# print_words(root, "")
assert all(root.find(lowercase__ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase_ ( __A : str , __A : int ) -> Dict:
"""simple docstring"""
print(str(lowercase__ ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowercase_ ( ) -> str:
"""simple docstring"""
assert test_trie()
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 94
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
| 187
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
from math import pow
def lowercase ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase_ : Dict = int(pow(__snake_case , __snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase_ , lowercase_ : Optional[int] = backtrack(
__snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase_ , lowercase_ : int = backtrack(
__snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case )
return current_sum, solutions_count
def lowercase ( __snake_case : int , __snake_case : int ):
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__snake_case , __snake_case , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[int] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(lowercase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(lowercase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(lowercase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
__lowercase = load_checkpoint(lowercase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(lowercase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(lowercase ).half().eval()
model.load_state_dict(lowercase )
# Check results
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 534
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_a : Union[str, Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_a : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=1_0_0 , UpperCAmelCase : List[Any]=" " ):
lowerCAmelCase__ :List[str] = text.split(UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )]
def snake_case__ ( UpperCAmelCase : dict ):
lowerCAmelCase__ ,lowerCAmelCase__ :Any = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(UpperCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(UpperCAmelCase )
return {"title": titles, "text": texts}
def snake_case__ ( UpperCAmelCase : dict , UpperCAmelCase : DPRContextEncoder , UpperCAmelCase : DPRContextEncoderTokenizerFast ):
lowerCAmelCase__ :Optional[int] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=UpperCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
lowerCAmelCase__ :Tuple = ctx_encoder(input_ids.to(device=UpperCAmelCase ) , return_dict=UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case__ ( UpperCAmelCase : "RagExampleArguments" , UpperCAmelCase : "ProcessingArguments" , UpperCAmelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ :int = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ :Optional[Any] = dataset.map(UpperCAmelCase , batched=UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ :List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ :List[str] = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ :Optional[Any] = dataset.map(
partial(UpperCAmelCase , ctx_encoder=UpperCAmelCase , ctx_tokenizer=UpperCAmelCase ) , batched=UpperCAmelCase , batch_size=processing_args.batch_size , features=UpperCAmelCase , )
# And finally save your dataset
lowerCAmelCase__ :int = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=UpperCAmelCase )
# And save the index
lowerCAmelCase__ :Optional[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
A = field(
default=str(Path(_A ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
A = field(
default=_A , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
A = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
A = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
A = field(
default=str(Path(_A ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
A = field(
default=_A , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
A = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
A = field(
default=7_68 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
A = field(
default=1_28 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_a : Optional[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_a , _a , _a : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 111
|
from __future__ import annotations
import numpy as np
def snake_case__ ( UpperCAmelCase : np.ndarray ):
lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = np.shape(UpperCAmelCase )
if rows != columns:
lowerCAmelCase__ :Tuple = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = np.zeros((rows, columns) )
lowerCAmelCase__ :Tuple = np.zeros((rows, columns) )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
lowerCAmelCase__ :Any = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCAmelCase__ :Any = (table[i][j] - total) / upper[j][j]
lowerCAmelCase__ :Tuple = 1
for j in range(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase ) )
lowerCAmelCase__ :Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a: Dict = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Tuple = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Union[str, Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: str = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
|
def __lowerCAmelCase ( A , A , A , A ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ = [False] * len(A )
UpperCAmelCase_ = []
queue.append(A )
UpperCAmelCase_ = True
while queue:
UpperCAmelCase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A )
UpperCAmelCase_ = True
UpperCAmelCase_ = u
return visited[t]
def __lowerCAmelCase ( A , A , A ):
# This array is filled by BFS and to store path
UpperCAmelCase_ = [-1] * (len(A ))
UpperCAmelCase_ = 0
while bfs(A , A , A , A ):
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ = min(A , graph[parent[s]][s] )
UpperCAmelCase_ = parent[s]
max_flow += path_flow
UpperCAmelCase_ = sink
while v != source:
UpperCAmelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ = parent[v]
return max_flow
_a: Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a: Optional[int] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 162
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase = """bart"""
lowerCamelCase = True
@st.cache(allow_output_mutation=__UpperCamelCase )
def SCREAMING_SNAKE_CASE( ) -> Dict:
if LOAD_DENSE_INDEX:
a__ : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
a__ : Optional[int] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
a__ : Union[str, Any] = qar_model.eval()
else:
a__ , a__ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
a__ : Optional[Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
a__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
a__ : Any = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
a__ : int = sas_model.eval()
else:
a__ , a__ : Dict = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCamelCase )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
if LOAD_DENSE_INDEX:
a__ : Optional[Any] = faiss.StandardGpuResources()
a__ : Union[str, Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
a__ : str = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_28) , )
a__ : Union[str, Any] = faiss.IndexFlatIP(1_28 )
a__ : Any = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase )
wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU
else:
a__ , a__ : Dict = (None, None)
a__ : List[Any] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCamelCase )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
a__ : Optional[int] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
a__ : Optional[Any] = elia["train_eli5"]
a__ : int = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_28) )
a__ : List[str] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCamelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase , lowerCamelCase , lowerCamelCase = load_indexes()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = load_models()
lowerCamelCase , lowerCamelCase = load_train_data()
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=10 ) -> int:
a__ : Optional[Any] = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase )
a__ , a__ : int = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase )
a__ : Tuple = [elia_train[int(__UpperCamelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase="wiki40b" , __UpperCamelCase="dense" , __UpperCamelCase=10 ) -> int:
if source == "none":
a__ , a__ : List[str] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a__ , a__ : str = query_qa_dense_index(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
a__ , a__ : str = query_es_index(
__UpperCamelCase , __UpperCamelCase , index_name="english_wiki40b_snippets_100w" , n_results=__UpperCamelCase , )
a__ : str = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
a__ : Union[str, Any] = "question: {} context: {}".format(__UpperCamelCase , __UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCamelCase : None),
} )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=64 , __UpperCamelCase=2_56 , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=0.9_5 , __UpperCamelCase=0.8 ) -> Any:
with torch.no_grad():
a__ : int = qa_sas_generate(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=10_24 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCamelCase = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCamelCase = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCamelCase = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCamelCase = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCamelCase = action_list.index(action_st)
lowerCamelCase = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCamelCase = show_type == """Show full text of passages"""
else:
lowerCamelCase = 3
lowerCamelCase = True
lowerCamelCase = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCamelCase = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCamelCase = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCamelCase = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCamelCase = """wiki40b"""
lowerCamelCase = """dense"""
lowerCamelCase = """beam"""
lowerCamelCase = 2
lowerCamelCase = 64
lowerCamelCase = 2_56
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCamelCase = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCamelCase = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCamelCase = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowerCamelCase = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowerCamelCase = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCamelCase = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCamelCase = None
# start main text
lowerCamelCase = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCamelCase = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase = st.text_input("""Enter your question here:""", """""")
else:
lowerCamelCase = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase , lowerCamelCase = make_support(question, source=wiki_source, method="""dense""", n_results=10)
lowerCamelCase , lowerCamelCase = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
lowerCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase = support_list[:10]
lowerCamelCase = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCamelCase , lowerCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCamelCase , lowerCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCamelCase = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCamelCase = res[1].strip()
if sec_titles == "":
lowerCamelCase = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCamelCase = sec_titles.split(""" & """)
lowerCamelCase = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase = find_nearest_training(question)
lowerCamelCase = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCamelCase = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCamelCase = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 207
|
import sys
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Dict:
a__ : List[Any] = len(__UpperCamelCase )
a__ : Optional[Any] = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
a__ : Optional[int] = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
for chain_length in range(2 , __UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
a__ : int = a + chain_length - 1
a__ : Tuple = sys.maxsize
for c in range(__UpperCamelCase , __UpperCamelCase ):
a__ : Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
a__ : str = cost
a__ : Union[str, Any] = c
return matrix, sol
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
if i == j:
print("A" + str(__UpperCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__UpperCamelCase , __UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCamelCase , optimal_solution[i][j] + 1 , __UpperCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE( ) -> Any:
a__ : Optional[int] = [30, 35, 15, 5, 10, 20, 25]
a__ : Optional[int] = len(__UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
a__ , a__ : Union[str, Any] = matrix_chain_order(__UpperCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 207
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 10**-1 ) -> bool:
"""simple docstring"""
A__ = cross(lowercase_ , lowercase_ )
A__ = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
_lowerCamelCase : int = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowerCamelCase : str = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowerCamelCase : str = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowerCamelCase : Tuple = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
_lowerCamelCase : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'efficientformer'
def __init__( self , a__ = [3, 2, 6, 4] , a__ = [48, 96, 224, 448] , a__ = [True, True, True, True] , a__ = 448 , a__ = 32 , a__ = 4 , a__ = 7 , a__ = 5 , a__ = 8 , a__ = 4 , a__ = 0.0 , a__ = 16 , a__ = 3 , a__ = 3 , a__ = 3 , a__ = 2 , a__ = 1 , a__ = 0.0 , a__ = 1 , a__ = True , a__ = True , a__ = 1e-5 , a__ = "gelu" , a__ = 0.02 , a__ = 1e-12 , a__ = 224 , a__ = 1e-05 , **a__ , ) -> None:
super().__init__(**a__ )
A = hidden_act
A = hidden_dropout_prob
A = hidden_sizes
A = num_hidden_layers
A = num_attention_heads
A = initializer_range
A = layer_norm_eps
A = patch_size
A = num_channels
A = depths
A = mlp_expansion_ratio
A = downsamples
A = dim
A = key_dim
A = attention_ratio
A = resolution
A = pool_size
A = downsample_patch_size
A = downsample_stride
A = downsample_pad
A = drop_path_rate
A = num_metaad_blocks
A = distillation
A = use_layer_scale
A = layer_scale_init_value
A = image_size
A = batch_norm_eps
| 641
| 0
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class a ( UpperCamelCase_ ):
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase ):
lowercase = parser.add_parser('env' )
download_parser.set_defaults(func=_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = huggingface_hub.__version__
lowercase = '''not installed'''
lowercase = '''NA'''
if is_torch_available():
import torch
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = '''not installed'''
if is_transformers_available():
import transformers
lowercase = transformers.__version__
lowercase = '''not installed'''
if is_accelerate_available():
import accelerate
lowercase = accelerate.__version__
lowercase = '''not installed'''
if is_xformers_available():
import xformers
lowercase = xformers.__version__
lowercase = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F'{pt_version} ({pt_cuda_available})',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_lowerCamelCase ) )
return info
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 719
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] ):
'''simple docstring'''
if len(__snake_case ) == 0:
return array
lowercase , lowercase = min(__snake_case ), max(__snake_case )
# Compute the variables
lowercase = _max - _min + 1
lowercase , lowercase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase = i - _min
lowercase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
lowercase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Optional[Any] = input('Enter numbers separated by comma:\n')
_UpperCamelCase : Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 134
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''depth_multiplier''' ) )
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=13 , __snake_case : Union[str, Any]=3 , __snake_case : List[Any]=32 , __snake_case : Dict=0.25 , __snake_case : Tuple=8 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=1024 , __snake_case : Tuple=32 , __snake_case : Optional[Any]="relu6" , __snake_case : str=0.1 , __snake_case : Tuple=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : str=10 , __snake_case : Optional[int]=None , ) -> List[Any]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : Union[str, Any] = depth_multiplier
UpperCAmelCase : Union[str, Any] = min_depth
UpperCAmelCase : Tuple = tf_padding
UpperCAmelCase : Any = int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Dict = output_stride
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = classifier_dropout_prob
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Any = is_training
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : str = scope
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Dict , __snake_case : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ) -> int:
UpperCAmelCase : List[str] = MobileNetVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : int = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Any = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : int = MobileNetVaModelTester(self )
UpperCAmelCase : List[str] = MobileNetVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def A ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def A ( self : List[str] ) -> Optional[int]:
pass
def A ( self : str ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__snake_case )
UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Any = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : int ) -> str:
def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : str ):
UpperCAmelCase : Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Tuple = outputs.hidden_states
UpperCAmelCase : List[str] = 26
self.assertEqual(len(__snake_case ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A ( self : Union[str, Any] ) -> Any:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = MobileNetVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ) -> Union[str, Any]:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : int ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def A ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(__snake_case )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : str = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**__snake_case )
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 127
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : List[str] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 1
|
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _A ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 41
|
'''simple docstring'''
_UpperCamelCase : Dict = range(2, 20 + 1)
_UpperCamelCase : str = [10**k for k in range(ks[-1] + 1)]
_UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( A : List[Any] , A : str , A : Union[str, Any] , A : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = sum(a_i[j] for j in range(A , len(A ) ) )
UpperCAmelCase_ : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = 0, 0
UpperCAmelCase_ : str = n - i
UpperCAmelCase_ : Union[str, Any] = memo.get(A )
if sub_memo is not None:
UpperCAmelCase_ : Dict = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
UpperCAmelCase_ : str = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase_ : Any = _k
break
if max_jump >= 0:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase_ : Union[str, Any] = diff + c
for j in range(min(A , len(A ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 )
if new_c > 0:
add(A , A , A )
else:
UpperCAmelCase_ : Tuple = []
else:
UpperCAmelCase_ : List[str] = {c: []}
UpperCAmelCase_ : List[str] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase_ , UpperCAmelCase_ : int = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
UpperCAmelCase_ : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase_ : List[Any] = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( A : List[str] , A : Dict , A : List[str] , A : Union[str, Any] ) -> str:
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase_ : int = i
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase_ : Optional[int] = ds_c + ds_b
diff += addend
UpperCAmelCase_ : Any = 0
for j in range(A ):
UpperCAmelCase_ : Dict = a_i[j] + addend
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : str ) -> List[Any]:
for j in range(A , len(A ) ):
UpperCAmelCase_ : List[str] = digits[j] + addend
if s >= 1_0:
UpperCAmelCase_ , UpperCAmelCase_ : Any = divmod(A , 1_0 )
UpperCAmelCase_ : str = addend // 1_0 + quotient
else:
UpperCAmelCase_ : Any = s
UpperCAmelCase_ : Any = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = divmod(A , 1_0 )
digits.append(A )
def __UpperCAmelCase ( A : int = 1_0**1_5 ) -> int:
UpperCAmelCase_ : Any = [1]
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[Any] = 0
while True:
UpperCAmelCase_ , UpperCAmelCase_ : str = next_term(A , 2_0 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase_ : Any = 0
for j in range(len(A ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 541
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Union[str, Any] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
lowercase : int = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
lowercase : Union[str, Any] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = SqueezeBertTokenizer
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : Optional[Any]=True , A_ : Optional[Any]="[UNK]" , A_ : List[str]="[SEP]" , A_ : List[str]="[PAD]" , A_ : List[str]="[CLS]" , A_ : int="[MASK]" , A_ : Dict=True , A_ : str=None , **A_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCamelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A_ ) != tokenize_chinese_chars
):
lowerCamelCase_: List[str] = getattr(A_ , normalizer_state.pop("""type""" ) )
lowerCamelCase_: Dict = do_lower_case
lowerCamelCase_: Optional[Any] = strip_accents
lowerCamelCase_: str = tokenize_chinese_chars
lowerCamelCase_: List[Any] = normalizer_class(**A_ )
lowerCamelCase_: List[Any] = do_lower_case
def lowerCAmelCase ( self : Optional[Any] , A_ : List[str] , A_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_: Tuple = [self.sep_token_id]
lowerCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_: int = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 584
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 584
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCamelCase ):
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : List[str] ):
lowerCamelCase :Optional[Any] = feature_size
lowerCamelCase :Optional[int] = sampling_rate
lowerCamelCase :Union[str, Any] = padding_value
lowerCamelCase :Optional[Any] = kwargs.pop('''padding_side''' , '''right''' )
lowerCamelCase :List[str] = kwargs.pop('''return_attention_mask''' , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
def snake_case ( self : Union[str, Any] , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCamelCase :List[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
lowerCamelCase :str = processed_features[self.model_input_names[0]]
lowerCamelCase :List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase_ ) == 0:
if return_attention_mask:
lowerCamelCase :int = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCamelCase :Tuple = required_input[0]
if isinstance(lowerCAmelCase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCamelCase :List[Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase_ ):
lowerCamelCase :Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase_ ):
lowerCamelCase :Optional[int] = '''tf'''
elif is_torch_tensor(lowerCAmelCase_ ):
lowerCamelCase :int = '''pt'''
elif isinstance(lowerCAmelCase_ , (int, float, list, tuple, np.ndarray) ):
lowerCamelCase :Tuple = '''np'''
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCAmelCase_ )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCamelCase :str = to_numpy(lowerCAmelCase_ )
else:
lowerCamelCase :Optional[Any] = [to_numpy(lowerCAmelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCamelCase :List[str] = self._get_padding_strategies(padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
lowerCamelCase :Optional[Any] = processed_features[self.model_input_names[0]]
lowerCamelCase :List[str] = len(lowerCAmelCase_ )
if not all(len(lowerCAmelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
lowerCamelCase :int = []
for i in range(lowerCAmelCase_ ):
lowerCamelCase :Any = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCamelCase :Tuple = self._truncate(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
truncated_inputs.append(lowerCAmelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCamelCase :Tuple = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCamelCase :int = PaddingStrategy.MAX_LENGTH
lowerCamelCase :Union[str, Any] = {}
for i in range(lowerCAmelCase_ ):
# padding
lowerCamelCase :List[str] = self._pad(
truncated_inputs[i] , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCamelCase :str = []
if value.dtype is np.dtype(np.floataa ):
lowerCamelCase :Optional[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase_ )
return BatchFeature(lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def snake_case ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
lowerCamelCase :List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCamelCase :List[str] = len(lowerCAmelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase :List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase :Optional[int] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCamelCase :int = np.ones(len(lowerCAmelCase_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCamelCase :Any = max_length - len(lowerCAmelCase_ )
if self.padding_side == "right":
if return_attention_mask:
lowerCamelCase :Optional[Any] = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
lowerCamelCase :str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCamelCase :Any = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCamelCase :Optional[Any] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
lowerCamelCase :str = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCamelCase :Tuple = np.pad(
lowerCAmelCase_ , lowerCAmelCase_ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def snake_case ( self : Union[str, Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
lowerCamelCase :Tuple = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase :Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase :List[Any] = len(lowerCAmelCase_ ) > max_length
if needs_to_be_truncated:
lowerCamelCase :Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCamelCase :int = processed_features['''attention_mask'''][:max_length]
return processed_features
def snake_case ( self : Tuple , __snake_case : str=False , __snake_case : Optional[Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
lowerCamelCase :str = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase :Tuple = PaddingStrategy(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase :Dict = padding
else:
lowerCamelCase :int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 166
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
| 0
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : str = 16
lowerCAmelCase_ : str = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Optional[Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : int ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> Any:
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
_a , _a = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**lowercase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def _lowerCamelCase ( ) -> int:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 521
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase_ : Dict = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase_ : str = ['names', 'prefix']
lowerCAmelCase_ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCAmelCase_ : Tuple = ['encoding_errors', 'on_bad_lines']
lowerCAmelCase_ : Dict = ['date_format']
@dataclass
class __SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
"""simple docstring"""
__a =","
__a =None
__a ="infer"
__a =None
__a =None
__a =None
__a =None
__a =None
__a =True
__a =None
__a =None
__a =None
__a =None
__a =False
__a =None
__a =None
__a =None
__a =True
__a =True
__a =False
__a =True
__a =None
__a ="."
__a =None
__a ='"'
__a =0
__a =None
__a =None
__a =None
__a =None
__a =True
__a =True
__a =0
__a =True
__a =False
__a =None
__a =1_0000
__a =None
__a ="strict"
__a ="error"
__a =None
def UpperCamelCase__ ( self : Dict ):
if self.delimiter is not None:
_a = self.delimiter
if self.column_names is not None:
_a = self.column_names
@property
def UpperCamelCase__ ( self : List[Any] ):
_a = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__a =CsvConfig
def UpperCamelCase__ ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self : int , __a : List[Any] ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
_a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
_a = data_files
if isinstance(__a , __a ):
_a = [files]
_a = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_a = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
_a = [files]
_a = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase__ ( self : Dict , __a : pa.Table ):
if self.config.features is not None:
_a = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
_a = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_a = table_cast(__a , __a )
return pa_table
def UpperCamelCase__ ( self : Tuple , __a : str ):
_a = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_a = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
_a = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
_a = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise
| 521
| 1
|
# using dfs for finding eulerian path traversal
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase__ , lowercase__ = True, True
lowercase__ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = -1
for i in range(SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase__ , lowercase__ = check_circuit_or_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowercase__ = 1
if check == 2:
lowercase__ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowercase__ = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase__ = {
1: [],
2: []
# all degree is zero
}
lowercase__ = 10
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Tuple = """deberta-v2"""
def __init__( self , snake_case__=12_8100 , snake_case__=1536 , snake_case__=24 , snake_case__=24 , snake_case__=6144 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0 , snake_case__=0.0_2 , snake_case__=1e-7 , snake_case__=False , snake_case__=-1 , snake_case__=0 , snake_case__=True , snake_case__=None , snake_case__=0 , snake_case__="gelu" , **snake_case__ , ):
super().__init__(**snake_case__ )
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Any = relative_attention
lowerCAmelCase : Dict = max_relative_positions
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : Union[str, Any] = position_biased_input
# Backwards compatibility
if type(snake_case__ ) == str:
lowerCAmelCase : int = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : List[str] = pos_att_type
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : Dict = kwargs.get('pooler_hidden_size' , snake_case__ )
lowerCAmelCase : Optional[int] = pooler_dropout
lowerCAmelCase : Optional[Any] = pooler_hidden_act
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase ( self ):
return 12
def lowercase ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , snake_case__ = None , ):
lowerCAmelCase : Tuple = super().generate_dummy_inputs(preprocessor=snake_case__ , framework=snake_case__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 646
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : int = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 394
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = 'blip_text_model'
def __init__( self , _a=3_0524 , _a=768 , _a=768 , _a=3072 , _a=768 , _a=12 , _a=8 , _a=512 , _a="gelu" , _a=1e-12 , _a=0.0 , _a=0.0 , _a=0.02 , _a=3_0522 , _a=2 , _a=0 , _a=102 , _a=True , _a=True , **_a , ):
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
a__ = vocab_size
a__ = hidden_size
a__ = encoder_hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = hidden_dropout_prob
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = max_position_embeddings
a__ = layer_norm_eps
a__ = hidden_act
a__ = initializer_range
a__ = attention_probs_dropout_prob
a__ = is_decoder
a__ = use_cache
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = 'blip_vision_model'
def __init__( self , _a=768 , _a=3072 , _a=512 , _a=12 , _a=12 , _a=384 , _a=16 , _a="gelu" , _a=1e-5 , _a=0.0 , _a=1e-10 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = hidden_size
a__ = intermediate_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = patch_size
a__ = image_size
a__ = initializer_range
a__ = attention_dropout
a__ = layer_norm_eps
a__ = hidden_act
@classmethod
def lowercase__ ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
a__ , a__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
a__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Any = 'blip'
SCREAMING_SNAKE_CASE:List[str] = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=256 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
if text_config is None:
a__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
a__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
a__ = BlipTextConfig(**_a )
a__ = BlipVisionConfig(**_a )
a__ = self.vision_config.hidden_size
a__ = projection_dim
a__ = logit_scale_init_value
a__ = 1.0
a__ = 0.02
a__ = image_text_hidden_size
@classmethod
def lowercase__ ( cls , _a , _a , **_a ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 394
| 1
|
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669
| 0
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
a :Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
i -= len(UpperCAmelCase_ )
a :Tuple = i // 3
a :int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a :Union[str, Any] = (
chars_incl
+ random(UpperCAmelCase_ , quotient + remainder )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
)
a :Dict = list(UpperCAmelCase_ )
shuffle(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
if len(UpperCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
a :Dict = any(char in ascii_uppercase for char in password )
a :Optional[int] = any(char in ascii_lowercase for char in password )
a :Tuple = any(char in digits for char in password )
a :Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = int(input('''Please indicate the max length of your password: ''' ).strip() )
a :Union[str, Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 445
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'unispeech'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
a :Any = hidden_size
a :str = feat_extract_norm
a :List[Any] = feat_extract_activation
a :Tuple = list(_lowerCamelCase )
a :Any = list(_lowerCamelCase )
a :List[Any] = list(_lowerCamelCase )
a :Union[str, Any] = conv_bias
a :str = num_conv_pos_embeddings
a :str = num_conv_pos_embedding_groups
a :Tuple = len(self.conv_dim )
a :int = num_hidden_layers
a :Any = intermediate_size
a :Optional[Any] = hidden_act
a :Tuple = num_attention_heads
a :Any = hidden_dropout
a :Any = attention_dropout
a :Optional[Any] = activation_dropout
a :Optional[Any] = feat_proj_dropout
a :Any = final_dropout
a :int = layerdrop
a :int = layer_norm_eps
a :Dict = initializer_range
a :Dict = num_ctc_classes
a :Optional[Any] = vocab_size
a :str = do_stable_layer_norm
a :Tuple = use_weighted_layer_sum
a :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :List[Any] = apply_spec_augment
a :Any = mask_time_prob
a :Union[str, Any] = mask_time_length
a :str = mask_time_min_masks
a :Tuple = mask_feature_prob
a :Dict = mask_feature_length
a :int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a :Union[str, Any] = num_codevectors_per_group
a :Dict = num_codevector_groups
a :List[Any] = contrastive_logits_temperature
a :Union[str, Any] = feat_quantizer_dropout
a :Optional[Any] = num_negatives
a :Tuple = codevector_dim
a :Optional[Any] = proj_codevector_dim
a :Union[str, Any] = diversity_loss_weight
# ctc loss
a :List[Any] = ctc_loss_reduction
a :Union[str, Any] = ctc_zero_infinity
# pretraining loss
a :int = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = 30
lowerCamelCase__ = self.seq_length + self.mem_len
lowerCamelCase__ = 15
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = [10, 50, 80]
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 8
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 2
lowerCamelCase__ = None
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = 3
lowerCamelCase__ = self.vocab_size - 1
lowerCamelCase__ = 0.01
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ , lowerCamelCase__ = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a_ : int = () if is_tf_available() else ()
a_ : List[Any] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a_ : Optional[Any] = False
a_ : Dict = False
a_ : Any = False
a_ : Any = False
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFTransfoXLModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[str] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase__ = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
lowerCamelCase__ = model.get_bias()
assert name is None
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
def _UpperCamelCase ( self : int ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def _UpperCamelCase ( self : Dict ):
pass
@require_tf
class _a ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
lowerCamelCase__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
| 703
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case_ : int = 'bart'
snake_case_ : str = True
@st.cache(allow_output_mutation=_UpperCAmelCase)
def __snake_case ( ):
if LOAD_DENSE_INDEX:
UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
UpperCamelCase = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
UpperCamelCase = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase)
def __snake_case ( ):
if LOAD_DENSE_INDEX:
UpperCamelCase = faiss.StandardGpuResources()
UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''')['''train''']
UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 128), )
UpperCamelCase = faiss.IndexFlatIP(128)
UpperCamelCase = faiss.index_cpu_to_gpu(_UpperCAmelCase, 1, _UpperCAmelCase)
wikiaab_gpu_index_flat.add(_UpperCAmelCase) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase = (None, None)
UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase)
def __snake_case ( ):
UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''')
UpperCamelCase = elia['''train_eli5''']
UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 128))
UpperCamelCase = faiss.IndexFlatIP(128)
eli5_train_q_index.add(_UpperCAmelCase)
return (elia_train, eli5_train_q_index)
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = load_indexes()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = load_models()
snake_case_ , snake_case_ : Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Tuple=10):
UpperCamelCase = embed_questions_for_retrieval([question], _UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = [elia_train[int(_UpperCAmelCase)] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int="wiki40b", _UpperCAmelCase : List[Any]="dense", _UpperCAmelCase : List[str]=10):
if source == "none":
UpperCamelCase , UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase = query_qa_dense_index(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
else:
UpperCamelCase , UpperCamelCase = query_es_index(
_UpperCAmelCase, _UpperCAmelCase, index_name='''english_wiki40b_snippets_100w''', n_results=_UpperCAmelCase, )
UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
UpperCamelCase = '''question: {} context: {}'''.format(_UpperCAmelCase, _UpperCAmelCase)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase: None),
})
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : int, _UpperCAmelCase : Optional[Any]=64, _UpperCAmelCase : int=256, _UpperCAmelCase : int=False, _UpperCAmelCase : Dict=2, _UpperCAmelCase : Tuple=0.9_5, _UpperCAmelCase : Tuple=0.8):
with torch.no_grad():
UpperCamelCase = qa_sas_generate(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, num_answers=1, num_beams=_UpperCAmelCase, min_len=_UpperCAmelCase, max_len=_UpperCAmelCase, do_sample=_UpperCAmelCase, temp=_UpperCAmelCase, top_p=_UpperCAmelCase, top_k=_UpperCAmelCase, max_input_length=1024, device='''cuda:0''', )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
snake_case_ : str = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
snake_case_ : List[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case_ : Tuple = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case_ : Tuple = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
snake_case_ : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
snake_case_ : List[Any] = st.sidebar.selectbox(
'',
action_list,
index=3,
)
snake_case_ : int = action_list.index(action_st)
snake_case_ : int = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
snake_case_ : List[str] = show_type == 'Show full text of passages'
else:
snake_case_ : Dict = 3
snake_case_ : Dict = True
snake_case_ : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
snake_case_ : Tuple = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
snake_case_ : str = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
snake_case_ : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
snake_case_ : int = 'wiki40b'
snake_case_ : Union[str, Any] = 'dense'
snake_case_ : int = 'beam'
snake_case_ : List[Any] = 2
snake_case_ : Union[str, Any] = 64
snake_case_ : Union[str, Any] = 256
snake_case_ : Dict = None
snake_case_ : Optional[int] = None
snake_case_ : Any = st.sidebar.checkbox('Generation options')
if generate_options:
snake_case_ : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
snake_case_ : Union[str, Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
snake_case_ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
snake_case_ : Optional[int] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
snake_case_ : List[Any] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case_ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case_ : int = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case_ : int = None
# start main text
snake_case_ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
snake_case_ : Tuple = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case_ : Union[str, Any] = st.text_input('Enter your question here:', '')
else:
snake_case_ : Union[str, Any] = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case_ , snake_case_ : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
snake_case_ , snake_case_ : Tuple = make_support(question, source=wiki_source, method='sparse', n_results=10)
snake_case_ : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case_ : Optional[Any] = support_list[:10]
snake_case_ : int = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
snake_case_ , snake_case_ : Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case_ , snake_case_ : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
snake_case_ : Dict = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
snake_case_ : int = res[1].strip()
if sec_titles == "":
snake_case_ : Tuple = '[{}]({})'.format(res[0], wiki_url)
else:
snake_case_ : List[Any] = sec_titles.split(' & ')
snake_case_ : List[Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
snake_case_ : Dict = find_nearest_training(question)
snake_case_ : Union[str, Any] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
snake_case_ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
snake_case_ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 212
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42, dataset_name='''my_dataset''')}),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42)}),
SplitDict({'''train''': SplitInfo()}),
], )
def __snake_case ( _UpperCAmelCase : SplitDict):
UpperCamelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
UpperCamelCase = SplitDict._from_yaml_list(_UpperCAmelCase)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase), SplitInfo(dataset_name='''my_dataset''')])
def __snake_case ( _UpperCAmelCase : Dict):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCamelCase = asdict(SplitDict({'''train''': split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 212
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__A : Optional[int] = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
__A : List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__A : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__A : List[str] = 4
__A : Any = True
# hparam_utils.py hparams
__A : Any = 0.664694
__A : Dict = 0.207951
__A : Optional[Any] = 0.121194
__A : Any = True
__A : Any = True
__A : int = False
__A : Optional[int] = 0.0352513
__A : List[Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__A : Tuple = 4
__A : Optional[int] = False
# hparam_utils.py hparams
__A : List[str] = 36.4519
__A : Union[str, Any] = 0.903421
__A : List[str] = 222.088
__A : Optional[Any] = True
__A : Optional[int] = True
__A : Union[str, Any] = True
__A : str = 0.763141
__A : Union[str, Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
__A : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
__A : Any = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__A : List[Any] = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
__A : int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 540
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def __init__( self :Dict, snake_case :int, snake_case :int, snake_case :float, **snake_case :Tuple):
"""simple docstring"""
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('padding_side', 'right')
_lowercase =kwargs.pop('return_attention_mask', snake_case)
super().__init__(**snake_case)
def UpperCamelCase__ ( self :Any, snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
], snake_case :Union[bool, str, PaddingStrategy] = True, snake_case :Optional[int] = None, snake_case :bool = False, snake_case :Optional[int] = None, snake_case :Optional[bool] = None, snake_case :Optional[Union[str, TensorType]] = None, ):
"""simple docstring"""
if isinstance(snake_case, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys())}''')
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(snake_case) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(snake_case, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index]) == 0:
index += 1
if index < len(snake_case):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(snake_case):
_lowercase ='tf'
elif is_torch_tensor(snake_case):
_lowercase ='pt'
elif isinstance(snake_case, (int, float, list, tuple, np.ndarray)):
_lowercase ='np'
else:
raise ValueError(
f'''type of {first_element} unknown: {type(snake_case)}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.')
for key, value in processed_features.items():
if isinstance(value[0], (int, float)):
_lowercase =to_numpy(snake_case)
else:
_lowercase =[to_numpy(snake_case) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=snake_case, max_length=snake_case)
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(snake_case)
if not all(len(snake_case) == batch_size for v in processed_features.values()):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
_lowercase =[]
for i in range(snake_case):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
snake_case, max_length=snake_case, pad_to_multiple_of=snake_case, truncation=snake_case, )
truncated_inputs.append(snake_case)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(snake_case):
# padding
_lowercase =self._pad(
truncated_inputs[i], max_length=snake_case, padding_strategy=snake_case, pad_to_multiple_of=snake_case, return_attention_mask=snake_case, )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa):
_lowercase =value.astype(np.floataa)
batch_outputs[key].append(snake_case)
return BatchFeature(snake_case, tensor_type=snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :Union[Dict[str, np.ndarray], BatchFeature], snake_case :Optional[int] = None, snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD, snake_case :Optional[int] = None, snake_case :Optional[bool] = None, ):
"""simple docstring"""
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(snake_case)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(snake_case) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(snake_case), dtype=np.intaa)
if needs_to_be_padded:
_lowercase =max_length - len(snake_case)
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'], (0, difference))
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
snake_case, snake_case, 'constant', constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'], (difference, 0))
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
snake_case, snake_case, 'constant', constant_values=self.padding_value)
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return processed_features
def UpperCamelCase__ ( self :Any, snake_case :Union[Dict[str, np.ndarray], BatchFeature], snake_case :Optional[int] = None, snake_case :Optional[int] = None, snake_case :Optional[bool] = None, ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(snake_case) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase__ ( self :List[str], snake_case :Dict=False, snake_case :Tuple=None):
"""simple docstring"""
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(snake_case, snake_case):
_lowercase =PaddingStrategy(snake_case)
elif isinstance(snake_case, snake_case):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''')
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.')
return padding_strategy
| 181
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Dict =['''image_processor''', '''tokenizer''']
__lowerCAmelCase : List[Any] ='''BlipImageProcessor'''
__lowerCAmelCase : Union[str, Any] =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :Optional[int], snake_case :int, snake_case :Optional[int]):
"""simple docstring"""
_lowercase =False
super().__init__(snake_case, snake_case)
_lowercase =self.image_processor
def __call__( self :Dict, snake_case :ImageInput = None, snake_case :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, snake_case :bool = True, snake_case :Union[bool, str, PaddingStrategy] = False, snake_case :Union[bool, str, TruncationStrategy] = None, snake_case :Optional[int] = None, snake_case :int = 0, snake_case :Optional[int] = None, snake_case :Optional[bool] = None, snake_case :bool = False, snake_case :bool = False, snake_case :bool = False, snake_case :bool = False, snake_case :bool = False, snake_case :bool = True, snake_case :Optional[Union[str, TensorType]] = None, **snake_case :str, ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
_lowercase =self.tokenizer
_lowercase =self.tokenizer(
text=snake_case, add_special_tokens=snake_case, padding=snake_case, truncation=snake_case, max_length=snake_case, stride=snake_case, pad_to_multiple_of=snake_case, return_attention_mask=snake_case, return_overflowing_tokens=snake_case, return_special_tokens_mask=snake_case, return_offsets_mapping=snake_case, return_token_type_ids=snake_case, return_length=snake_case, verbose=snake_case, return_tensors=snake_case, **snake_case, )
return text_encoding
# add pixel_values
_lowercase =self.image_processor(snake_case, return_tensors=snake_case)
if text is not None:
_lowercase =self.tokenizer(
text=snake_case, add_special_tokens=snake_case, padding=snake_case, truncation=snake_case, max_length=snake_case, stride=snake_case, pad_to_multiple_of=snake_case, return_attention_mask=snake_case, return_overflowing_tokens=snake_case, return_special_tokens_mask=snake_case, return_offsets_mapping=snake_case, return_token_type_ids=snake_case, return_length=snake_case, verbose=snake_case, return_tensors=snake_case, **snake_case, )
else:
_lowercase =None
if text_encoding is not None:
encoding_image_processor.update(snake_case)
return encoding_image_processor
def UpperCamelCase__ ( self :List[Any], *snake_case :int, **snake_case :str):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case, **snake_case)
def UpperCamelCase__ ( self :Tuple, *snake_case :List[str], **snake_case :str):
"""simple docstring"""
return self.tokenizer.decode(*snake_case, **snake_case)
@property
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.tokenizer.model_input_names
_lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 181
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase (yaml.SafeLoader ):
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case : str = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys]
_snake_case : str = Counter(lowercase__ )
_snake_case : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = super().construct_mapping(lowercase__ , deep=lowercase__ )
self._check_no_duplicates_on_constructed_node(lowercase__ )
return mapping
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case : int = full_content[1:].index('''---''' ) + 1
_snake_case : Optional[Any] = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class lowerCamelCase (a__ ):
# class attributes
_lowercase : Tuple = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata":
"""simple docstring"""
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
_snake_case , _snake_case : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase__ )
else:
return cls()
def UpperCAmelCase_ ( self , lowercase__ ) -> Dict:
"""simple docstring"""
if path.exists():
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
_snake_case : Any = readme_file.read()
else:
_snake_case : int = None
_snake_case : Optional[int] = self._to_readme(lowercase__ )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ = None ) -> str:
"""simple docstring"""
if readme_content is not None:
_snake_case , _snake_case : Any = _split_yaml_from_readme(lowercase__ )
_snake_case : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_snake_case : int = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata":
"""simple docstring"""
_snake_case : Optional[int] = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case : Optional[Any] = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase : Optional[Any] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Optional[Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCAmelCase : Optional[int] = ap.parse_args()
UpperCAmelCase : int = Path(args.readme_filepath)
UpperCAmelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 47
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline
__UpperCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"]
__UpperCamelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1_00
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: List[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case: Union[str, Any] = UNetaDConditionModel(**_snake_case )
return model
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case: str = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.dummy_unet
snake_case: Dict = self.dummy_movq
snake_case: Dict = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case: Any = DDIMScheduler(**_snake_case )
snake_case: Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case: int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
snake_case: int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case: Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
# create hint
snake_case: Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('mps' ):
snake_case: int = torch.manual_seed(_snake_case )
else:
snake_case: str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case: Optional[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = "cpu"
snake_case: Tuple = self.get_dummy_components()
snake_case: List[Any] = self.pipeline_class(**_snake_case )
snake_case: List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case: List[str] = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case: Optional[Any] = output.images
snake_case: Dict = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Union[str, Any] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
snake_case: Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case: List[Any] = init_image.resize((5_12, 5_12) )
snake_case: Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case: int = torch.from_numpy(np.array(_snake_case ) ).float() / 2_55.0
snake_case: Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case: Optional[int] = "A robot, 4k photo"
snake_case: str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case: int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case: Dict = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case: Any = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case: Tuple = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt='' , ).to_tuple()
snake_case: Optional[Any] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , )
snake_case: Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 329
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
| 0
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_a : List[Any]= 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192
|
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class UpperCamelCase :
UpperCAmelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : List[Any] = numpy.vectorize(lambda lowercase : x % 36 )
UpperCAmelCase : Dict = numpy.vectorize(lowercase )
def __init__(self : str , _A : numpy.ndarray) -> None:
__snake_case : str = self.modulus(_A) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__snake_case : Optional[Any] = encrypt_key.shape[0]
def _lowercase (self : Any , _A : str) -> int:
return self.key_string.index(_A)
def _lowercase (self : Union[str, Any] , _A : int) -> str:
return self.key_string[round(_A)]
def _lowercase (self : Optional[int]) -> None:
__snake_case : Any = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : Any = det % len(self.key_string)
__snake_case : Tuple = len(self.key_string)
if greatest_common_divisor(_A , len(self.key_string)) != 1:
__snake_case : List[str] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(_A)
def _lowercase (self : Dict , _A : str) -> str:
__snake_case : str = [char for char in text.upper() if char in self.key_string]
__snake_case : int = chars[-1]
while len(_A) % self.break_key != 0:
chars.append(_A)
return "".join(_A)
def _lowercase (self : Union[str, Any] , _A : str) -> str:
__snake_case : Any = self.process_text(text.upper())
__snake_case : Dict = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Dict = text[i : i + self.break_key]
__snake_case : List[str] = [self.replace_letters(_A) for char in batch]
__snake_case : str = numpy.array([vec]).T
__snake_case : List[Any] = self.modulus(self.encrypt_key.dot(_A)).T.tolist()[
0
]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowercase (self : Optional[int]) -> numpy.ndarray:
__snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
__snake_case : int = det % len(self.key_string)
__snake_case : Optional[Any] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
__snake_case : Dict = i
break
__snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_A))
def _lowercase (self : int , _A : str) -> str:
__snake_case : int = self.make_decrypt_key()
__snake_case : List[str] = self.process_text(text.upper())
__snake_case : str = ''
for i in range(0 , len(_A) - self.break_key + 1 , self.break_key):
__snake_case : Optional[Any] = text[i : i + self.break_key]
__snake_case : Union[str, Any] = [self.replace_letters(_A) for char in batch]
__snake_case : Tuple = numpy.array([vec]).T
__snake_case : List[str] = self.modulus(decrypt_key.dot(_A)).T.tolist()[0]
__snake_case : str = ''.join(
self.replace_digits(_A) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : List[str] = int(input('Enter the order of the encryption key: ' ) )
__snake_case : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(UpperCAmelCase_ ):
__snake_case : Union[str, Any] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
__snake_case : Dict = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__snake_case : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__snake_case : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
__snake_case : Tuple = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class a__( unittest.TestCase ):
def _lowercase ( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def _lowercase ( self , _UpperCAmelCase=1 ) -> Any:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _lowercase ( self , _UpperCAmelCase ) -> Any:
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ) -> List[str]:
# create estimator
snake_case__ =self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case__ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case__ =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 538
|
'''simple docstring'''
from timeit import timeit
def a ( UpperCamelCase_ : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ =0
while number:
number &= number - 1
result += 1
return result
def a ( UpperCamelCase_ : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ =0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ) -> None:
def do_benchmark(UpperCamelCase_ : int ) -> None:
snake_case__ ='import __main__ as z'
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(UpperCamelCase_ ) = }""" )
snake_case__ =timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=UpperCamelCase_ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase_ ) = }""" )
snake_case__ =timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=UpperCamelCase_ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(UpperCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 538
| 1
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
_SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE = frozenset([] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case: Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
_snake_case: str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
_snake_case: Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_zero=__snake_case , )
torch.manual_seed(0 )
_snake_case: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_snake_case: Optional[int] = CLIPTextModel(__snake_case )
_snake_case: Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case: int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : Optional[int] , __snake_case : Optional[Any]=0 ):
'''simple docstring'''
_snake_case: Optional[int] = floats_tensor((1, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
_snake_case: Optional[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('mps' ):
_snake_case: Union[str, Any] = torch.manual_seed(__snake_case )
else:
_snake_case: str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_snake_case: List[str] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int]=0 ):
'''simple docstring'''
_snake_case: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
_snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case: List[str] = Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' )
if str(__snake_case ).startswith('mps' ):
_snake_case: Tuple = torch.manual_seed(__snake_case )
else:
_snake_case: Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_snake_case: Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : Tuple , __snake_case : str=0 ):
'''simple docstring'''
_snake_case: List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
_snake_case: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case: List[Any] = Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' )
if str(__snake_case ).startswith('mps' ):
_snake_case: Any = torch.manual_seed(__snake_case )
else:
_snake_case: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_snake_case: Dict = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_snake_case: List[str] = self.get_dummy_components()
_snake_case: str = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__snake_case , __snake_case , __snake_case )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_snake_case: List[Any] = self.get_dummy_inputs(__snake_case )
_snake_case: Optional[Any] = pipe(**__snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__snake_case )
_snake_case: Any = self.pipeline_class.from_pretrained(__snake_case )
pipe_loaded.to(__snake_case )
pipe_loaded.set_progress_bar_config(disable=__snake_case )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__snake_case , __snake_case ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
_snake_case: Any = self.get_dummy_inputs(__snake_case )
_snake_case: List[str] = pipe_loaded(**__snake_case )[0]
_snake_case: List[str] = np.abs(output - output_loaded ).max()
self.assertLess(__snake_case , 1e-4 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Dict = 'cpu'
_snake_case: int = self.get_dummy_components()
_snake_case: Union[str, Any] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: Dict = self.get_dummy_mask_inputs(__snake_case )
_snake_case: Dict = pipe.generate_mask(**__snake_case )
_snake_case: Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_snake_case: Tuple = np.array([0] * 9 )
_snake_case: Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = 'cpu'
_snake_case: Tuple = self.get_dummy_components()
_snake_case: Optional[int] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: int = self.get_dummy_inversion_inputs(__snake_case )
_snake_case: Optional[Any] = pipe.invert(**__snake_case ).images
_snake_case: Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_snake_case: str = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_snake_case: Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[int] = 'cpu'
_snake_case: Optional[Any] = self.get_dummy_components()
_snake_case: int = {'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
_snake_case: str = DPMSolverMultistepScheduler(**__snake_case )
_snake_case: Any = DPMSolverMultistepInverseScheduler(**__snake_case )
_snake_case: Union[str, Any] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: Dict = self.get_dummy_inversion_inputs(__snake_case )
_snake_case: Optional[Any] = pipe.invert(**__snake_case ).images
_snake_case: Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_snake_case: Any = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
_snake_case: str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int ):
'''simple docstring'''
_snake_case: Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_snake_case: str = raw_image.convert('RGB' ).resize((7_68, 7_68) )
_snake_case: List[Any] = raw_image
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = torch.manual_seed(0 )
_snake_case: str = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__snake_case , torch_dtype=torch.floataa )
_snake_case: Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_snake_case: Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: Optional[int] = 'a bowl of fruit'
_snake_case: Tuple = 'a bowl of pears'
_snake_case: Dict = pipe.generate_mask(
image=self.raw_image , source_prompt=__snake_case , target_prompt=__snake_case , generator=__snake_case , )
_snake_case: Tuple = pipe.invert(
prompt=__snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=__snake_case ).latents
_snake_case: List[str] = pipe(
prompt=__snake_case , mask_image=__snake_case , image_latents=__snake_case , generator=__snake_case , negative_prompt=__snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_snake_case: Optional[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: List[Any] = torch.manual_seed(0 )
_snake_case: Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__snake_case , torch_dtype=torch.floataa )
_snake_case: Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case: Dict = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: int = 'a bowl of fruit'
_snake_case: str = 'a bowl of pears'
_snake_case: Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__snake_case , target_prompt=__snake_case , generator=__snake_case , )
_snake_case: int = pipe.invert(
prompt=__snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=__snake_case , num_inference_steps=25 , ).latents
_snake_case: Optional[int] = pipe(
prompt=__snake_case , mask_image=__snake_case , image_latents=__snake_case , generator=__snake_case , negative_prompt=__snake_case , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_snake_case: Optional[int] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 273
|
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273
| 1
|
SCREAMING_SNAKE_CASE__ = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 9
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class lowerCamelCase_( lowerCAmelCase ):
__magic_name__ : str = ["audio_values", "audio_mask"]
def __init__( self : Tuple , lowerCAmelCase : str=2048 , lowerCAmelCase : str=1 , lowerCAmelCase : Any=[16, 16] , lowerCAmelCase : List[str]=128 , lowerCAmelCase : Tuple=44100 , lowerCAmelCase : Dict=86 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : int=0.0 , **lowerCAmelCase : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = spectrogram_length
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = feature_size // self.patch_size[1]
UpperCAmelCase = n_fft
UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
UpperCAmelCase = sampling_rate
UpperCAmelCase = padding_value
UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def a__( self : Optional[int] , lowerCAmelCase : np.array )-> np.ndarray:
"""simple docstring"""
UpperCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
UpperCAmelCase = log_spec[:, :-1]
UpperCAmelCase = log_spec - 20.0
UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Optional[int] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
UpperCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
UpperCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = audio_features[i]
UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
UpperCAmelCase = {'''audio_values''': padded_audio_features}
UpperCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__A : Dict = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(SCREAMING_SNAKE_CASE ) )
return round(SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
|
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : List[str] = 1
__A : Union[str, Any] = 2
while i * i <= n:
__A : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ():
'''simple docstring'''
__A : Optional[Any] = 1
__A : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 111
| 1
|
'''simple docstring'''
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
UpperCamelCase__ = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
UpperCamelCase__ = column
continue
UpperCamelCase__ = column / magnitude
# Subtract to cancel term
UpperCamelCase__ = current_set[0]
UpperCamelCase__ = [first_row]
UpperCamelCase__ = current_set[1::]
for row in current_set:
UpperCamelCase__ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase__ = final_set[0]
UpperCamelCase__ = []
UpperCamelCase__ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase__ = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase__ )
UpperCamelCase__ = resultant
return final_set
def __magic_name__( _A ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCamelCase__ = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCamelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase__ = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase__ = data_set.copy()
UpperCamelCase__ = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
UpperCamelCase__ = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCamelCase__ )
UpperCamelCase__ = data_set.copy()
UpperCamelCase__ = simplify(lowerCamelCase__ )
UpperCamelCase__ = simplified[::-1]
UpperCamelCase__ = []
for row in simplified:
UpperCamelCase__ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase__ = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
UpperCamelCase__ = temp_row[1::]
UpperCamelCase__ = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
UpperCamelCase__ = []
for item in solutions:
final.append(float(round(lowerCamelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Optional[int] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 715
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : pyspark.sql.DataFrame , lowercase : Optional[NamedSplit] = None , lowercase : Optional[Features] = None , lowercase : bool = True , lowercase : str = None , lowercase : bool = False , lowercase : str = None , lowercase : bool = True , lowercase : str = "arrow" , **lowercase : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , **lowercase , )
UpperCamelCase__ = load_from_cache_file
UpperCamelCase__ = file_format
UpperCamelCase__ = Spark(
df=lowercase , features=lowercase , cache_dir=lowercase , working_dir=lowercase , **lowercase , )
def A ( self : int ) -> Any:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 265
| 0
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase = VQModel
UpperCamelCase = 'sample'
@property
def A__ ( self :Dict , __snake_case :Union[str, Any]=(32, 32) ):
'''simple docstring'''
__magic_name__ : Optional[Any] =4
__magic_name__ : Optional[Any] =3
__magic_name__ : Optional[int] =floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] ={
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__magic_name__ : Tuple =self.dummy_input
return init_dict, inputs_dict
def A__ ( self :Dict ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__snake_case )
__magic_name__ : Optional[int] =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__magic_name__ : Any =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__magic_name__ : int =image.to(__snake_case )
with torch.no_grad():
__magic_name__ : List[Any] =model(__snake_case ).sample
__magic_name__ : Tuple =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__magic_name__ : List[str] =torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
| 21
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : List[str] = DetaImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetaImageProcessor()
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetaImageProcessor(format="coco_panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 447
|
from math import sqrt
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __lowerCamelCase : int = 10001 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 447
| 1
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , snake_case : int = 128 , snake_case : int = 256 , snake_case : float = 2_000.0 , snake_case : int = 768 , snake_case : int = 12 , snake_case : int = 12 , snake_case : int = 64 , snake_case : int = 2_048 , snake_case : float = 0.1 , ):
super().__init__()
UpperCAmelCase_ :Optional[Any] = nn.Sequential(
nn.Linear(snake_case , d_model * 4 , bias=snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case ) , nn.SiLU() , )
UpperCAmelCase_ :Optional[int] = nn.Embedding(snake_case , snake_case )
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :Union[str, Any] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Any = nn.Dropout(p=snake_case )
UpperCAmelCase_ :Any = nn.ModuleList()
for lyr_num in range(snake_case ):
# FiLM conditional T5 decoder
UpperCAmelCase_ :List[str] = DecoderLayer(d_model=snake_case , d_kv=snake_case , num_heads=snake_case , d_ff=snake_case , dropout_rate=snake_case )
self.decoders.append(snake_case )
UpperCAmelCase_ :List[Any] = TaLayerNorm(snake_case )
UpperCAmelCase_ :str = nn.Dropout(p=snake_case )
UpperCAmelCase_ :str = nn.Linear(snake_case , snake_case , bias=snake_case )
def snake_case_ ( self : str , snake_case : Tuple , snake_case : Optional[int] ):
UpperCAmelCase_ :Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def snake_case_ ( self : Optional[Any] , snake_case : str , snake_case : str , snake_case : Optional[Any] ):
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ :int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ :Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase_ :Tuple = self.conditioning_emb(snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ :Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ :List[str] = torch.broadcast_to(
torch.arange(snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase_ :int = self.position_encoding(snake_case )
UpperCAmelCase_ :int = self.continuous_inputs_projection(snake_case )
inputs += position_encodings
UpperCAmelCase_ :Dict = self.dropout(snake_case )
# decoder: No padding present.
UpperCAmelCase_ :int = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ :int = [(x, self.encoder_decoder_mask(snake_case , snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ :Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase_ :List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase_ :Optional[Any] = lyr(
snake_case , conditioning_emb=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )[0]
UpperCAmelCase_ :List[str] = self.decoder_norm(snake_case )
UpperCAmelCase_ :Dict = self.post_dropout(snake_case )
UpperCAmelCase_ :Optional[int] = self.spec_out(snake_case )
return spec_out
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Any , snake_case : Optional[Any] , snake_case : int=1e-6 ):
super().__init__()
UpperCAmelCase_ :Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case , d_kv=snake_case , num_heads=snake_case , dropout_rate=snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case , d_kv=snake_case , num_heads=snake_case , dropout_rate=snake_case , layer_norm_epsilon=snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case , d_ff=snake_case , dropout_rate=snake_case , layer_norm_epsilon=snake_case ) )
def snake_case_ ( self : List[str] , snake_case : Dict , snake_case : Tuple=None , snake_case : int=None , snake_case : Dict=None , snake_case : Dict=None , snake_case : Optional[int]=None , ):
UpperCAmelCase_ :str = self.layer[0](
snake_case , conditioning_emb=snake_case , attention_mask=snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase_ :Dict = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase_ :str = self.layer[1](
snake_case , key_value_states=snake_case , attention_mask=snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ :Union[str, Any] = self.layer[-1](snake_case , snake_case )
return (hidden_states,)
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case : Dict , snake_case : str , snake_case : int , snake_case : Union[str, Any] ):
super().__init__()
UpperCAmelCase_ :List[Any] = TaLayerNorm(snake_case )
UpperCAmelCase_ :str = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case )
UpperCAmelCase_ :Optional[int] = Attention(query_dim=snake_case , heads=snake_case , dim_head=snake_case , out_bias=snake_case , scale_qk=snake_case )
UpperCAmelCase_ :Dict = nn.Dropout(snake_case )
def snake_case_ ( self : Any , snake_case : Tuple , snake_case : Tuple=None , snake_case : List[Any]=None , ):
# pre_self_attention_layer_norm
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
if conditioning_emb is not None:
UpperCAmelCase_ :Optional[int] = self.FiLMLayer(snake_case , snake_case )
# Self-attention block
UpperCAmelCase_ :Any = self.attention(snake_case )
UpperCAmelCase_ :str = hidden_states + self.dropout(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : Optional[int] ):
super().__init__()
UpperCAmelCase_ :List[Any] = Attention(query_dim=snake_case , heads=snake_case , dim_head=snake_case , out_bias=snake_case , scale_qk=snake_case )
UpperCAmelCase_ :Optional[int] = TaLayerNorm(snake_case , eps=snake_case )
UpperCAmelCase_ :int = nn.Dropout(snake_case )
def snake_case_ ( self : str , snake_case : List[str] , snake_case : List[str]=None , snake_case : str=None , ):
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
UpperCAmelCase_ :Any = self.attention(
snake_case , encoder_hidden_states=snake_case , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase_ :str = hidden_states + self.dropout(snake_case )
return layer_output
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , snake_case : int , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple ):
super().__init__()
UpperCAmelCase_ :Dict = TaDenseGatedActDense(d_model=snake_case , d_ff=snake_case , dropout_rate=snake_case )
UpperCAmelCase_ :List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case )
UpperCAmelCase_ :Union[str, Any] = TaLayerNorm(snake_case , eps=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Dropout(snake_case )
def snake_case_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int]=None ):
UpperCAmelCase_ :List[Any] = self.layer_norm(snake_case )
if conditioning_emb is not None:
UpperCAmelCase_ :Tuple = self.film(snake_case , snake_case )
UpperCAmelCase_ :Any = self.DenseReluDense(snake_case )
UpperCAmelCase_ :List[Any] = hidden_states + self.dropout(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str ):
super().__init__()
UpperCAmelCase_ :Any = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Dict = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase_ :Optional[int] = nn.Dropout(snake_case )
UpperCAmelCase_ :str = NewGELUActivation()
def snake_case_ ( self : int , snake_case : Any ):
UpperCAmelCase_ :List[str] = self.act(self.wi_a(snake_case ) )
UpperCAmelCase_ :str = self.wi_a(snake_case )
UpperCAmelCase_ :str = hidden_gelu * hidden_linear
UpperCAmelCase_ :Any = self.dropout(snake_case )
UpperCAmelCase_ :int = self.wo(snake_case )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Any=1e-6 ):
super().__init__()
UpperCAmelCase_ :List[str] = nn.Parameter(torch.ones(snake_case ) )
UpperCAmelCase_ :List[str] = eps
def snake_case_ ( self : Union[str, Any] , snake_case : Union[str, Any] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase_ :str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case )
UpperCAmelCase_ :int = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ :List[str] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
def snake_case_ ( self : int , snake_case : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(snake_case , 3.0 )) ))
class _snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple ):
super().__init__()
UpperCAmelCase_ :Optional[int] = nn.Linear(snake_case , out_features * 2 , bias=snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : Any , snake_case : Tuple ):
UpperCAmelCase_ :Tuple = self.scale_bias(snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :Union[str, Any] = torch.chunk(snake_case , 2 , -1 )
UpperCAmelCase_ :List[Any] = x * (1 + scale) + shift
return x
| 608
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__lowerCamelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def a ( __snake_case : Tuple, __snake_case : str ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase_ :Optional[int] = k.replace(__snake_case, __snake_case )
return k
def a ( __snake_case : dict, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :str = BigBirdPegasusConfig(**__snake_case )
UpperCAmelCase_ :Optional[Any] = BigBirdPegasusForConditionalGeneration(__snake_case )
UpperCAmelCase_ :Dict = torch_model.state_dict()
UpperCAmelCase_ :List[Any] = {}
# separating decoder weights
UpperCAmelCase_ :Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase_ :Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :int = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :Union[str, Any] = DECODER_PATTERNS
UpperCAmelCase_ :Any = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :str = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :Any = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :str = REMAINING_PATTERNS
UpperCAmelCase_ :Dict = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :Any = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCAmelCase_ :Optional[int] = mapping['''model.embed_positions.weight''']
UpperCAmelCase_ :Tuple = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = torch_model.load_state_dict(__snake_case, strict=__snake_case )
UpperCAmelCase_ :List[Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def a ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Tuple = tf.train.list_variables(__snake_case )
UpperCAmelCase_ :Optional[int] = {}
UpperCAmelCase_ :Optional[Any] = ['''global_step''']
for name, shape in tqdm(__snake_case, desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ :List[str] = tf.train.load_variable(__snake_case, __snake_case )
UpperCAmelCase_ :str = array
return tf_weights
def a ( __snake_case : str, __snake_case : str, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :Any = get_tf_weights_as_numpy(__snake_case )
UpperCAmelCase_ :Union[str, Any] = convert_bigbird_pegasus(__snake_case, __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 608
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 , _SCREAMING_SNAKE_CASE = 1_0 ):
__lowercase = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowercase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowercase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655
| 0
|
'''simple docstring'''
def _snake_case ( A_ : int ):
"""simple docstring"""
if not isinstance(A_ , A_ ):
a_ : str = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 0:
return False
a_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
|
'''simple docstring'''
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(A_ ) + """/grid.txt""" ) as f:
a_ : Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(A_ ) for x in f.readline().split()] )
a_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a_ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
a_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a_ : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a_ : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a_ : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 577
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_A = logging.get_logger("""transformers.models.speecht5""")
_A = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_A = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_A = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_A = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_A = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_A = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_A = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_A = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_A = []
_A = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_A = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Dict = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : int = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : int = value
elif weight_type == "running_var":
lowerCAmelCase__ : Dict = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : List[str] = value
else:
lowerCAmelCase__ : List[str] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = []
if task == "s2t":
lowerCAmelCase__ : int = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Optional[int] = MAPPING_S2T
lowerCAmelCase__ : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Tuple = MAPPING_T2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
lowerCAmelCase__ : Union[str, Any] = MAPPING_S2S
lowerCAmelCase__ : Optional[int] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCAmelCase , __UpperCAmelCase ):
logger.info(f"""{name} was ignored""" )
continue
lowerCAmelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCAmelCase__ : int = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCAmelCase__ : Dict = True
if "*" in mapped_key:
lowerCAmelCase__ : List[Any] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Any = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase__ : Any = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Tuple = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Dict = """bias"""
elif "weight" in name:
lowerCAmelCase__ : Any = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : Any = """running_mean"""
elif "running_var" in name:
lowerCAmelCase__ : List[Any] = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Tuple = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : int = name.split(""".""" )
lowerCAmelCase__ : Optional[Any] = int(items[0] )
lowerCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[Any]:
if config_path is not None:
lowerCAmelCase__ : Dict = SpeechTaConfig.from_pretrained(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
lowerCAmelCase__ : int = config.max_text_positions
lowerCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToText(__UpperCAmelCase )
elif task == "t2s":
lowerCAmelCase__ : Any = 1876
lowerCAmelCase__ : Dict = 600
lowerCAmelCase__ : Union[str, Any] = config.max_speech_positions
lowerCAmelCase__ : List[Any] = SpeechTaForTextToSpeech(__UpperCAmelCase )
elif task == "s2s":
lowerCAmelCase__ : Optional[Any] = 1876
lowerCAmelCase__ : Optional[Any] = config.max_speech_positions
lowerCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(__UpperCAmelCase )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowerCAmelCase__ : int = SpeechTaTokenizer(__UpperCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken("""<mask>""" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCAmelCase__ : Dict = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCAmelCase__ : Union[str, Any] = SpeechTaFeatureExtractor()
lowerCAmelCase__ : Optional[int] = SpeechTaProcessor(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.load(__UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , __UpperCAmelCase , __UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 713
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase )
return parser.parse_args()
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase__ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ : Optional[int] = script_fpath.stem
lowerCAmelCase__ : Dict = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
lowerCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 507
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCamelCase( __snake_case ) -> Optional[int]:
return x + 2
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case = "x = 3"
__snake_case = {}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
assert result == 3
self.assertDictEqual(_lowerCAmelCase ,{"x": 3} )
__snake_case = "x = y"
__snake_case = {"y": 5}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 5, "y": 5} )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "y = add_two(x)"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{"add_two": add_two} ,state=_lowerCAmelCase )
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "x = 3"
__snake_case = {}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
assert result == 3
self.assertDictEqual(_lowerCAmelCase ,{"x": 3} )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case = "test_dict = {'x': x, 'y': add_two(x)}"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{"add_two": add_two} ,state=_lowerCAmelCase )
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "y": 5} )
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case = "x = 3\ny = 5"
__snake_case = {}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "y": 5} )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "text = f'This is x: {x}.'"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "text": "This is x: 3."} )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "if x <= 3:\n y = 2\nelse:\n y = 5"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "y": 2} )
__snake_case = {"x": 8}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 8, "y": 5} )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case = "test_list = [x, add_two(x)]"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{"add_two": add_two} ,state=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,[3, 5] )
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "test_list": [3, 5]} )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "y = x"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{} ,state=_lowerCAmelCase )
assert result == 3
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "y": 3} )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case = "test_list = [x, add_two(x)]\ntest_list[1]"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{"add_two": add_two} ,state=_lowerCAmelCase )
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "test_list": [3, 5]} )
__snake_case = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__snake_case = {"x": 3}
__snake_case = evaluate(_lowerCAmelCase ,{"add_two": add_two} ,state=_lowerCAmelCase )
assert result == 5
self.assertDictEqual(_lowerCAmelCase ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "x = 0\nfor i in range(3):\n x = i"
__snake_case = {}
__snake_case = evaluate(_lowerCAmelCase ,{"range": range} ,state=_lowerCAmelCase )
assert result == 2
self.assertDictEqual(_lowerCAmelCase ,{"x": 2, "i": 2} )
| 524
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( snake_case__ ):
def __init__( self : Union[str, Any] ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = data
def __iter__( self : Optional[Any] ):
"""simple docstring"""
for element in self.data:
yield element
def _lowerCamelCase( __snake_case=True ) -> str:
__snake_case = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> int:
if iterable:
__snake_case = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
__snake_case = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
__snake_case = DataLoader(__snake_case , batch_size=__snake_case )
__snake_case = accelerator.prepare(__snake_case )
return dl
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
__snake_case = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
__snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCamelCase( ) -> List[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
__snake_case = ddp_model(batch[0].float() )
__snake_case = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCamelCase( __snake_case ) -> Any:
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = train_dl.batch_sampler.even_batches
__snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__snake_case = accelerator.state.distributed_type
__snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
__snake_case = original_state
if __name__ == "__main__":
main()
| 524
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ )
| 709
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A=None , _A=None , _A=None , _A="resnet50" , _A=3 , _A=3_2 , _A=3 , _A=True , _A=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def A__ ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def A__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ ( self , _A , _A ):
__lowerCAmelCase = TimmBackbone(config=_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def A__ ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =(TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def A__ ( self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def A__ ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def A__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ):
pass
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(_A )
model.to(_A )
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
| 102
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__(self : Dict , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , **snake_case_ : str , ):
super().__init__(**snake_case_ )
__a : List[Any] = size if size is not None else {'''shortest_edge''': 2_2_4}
__a : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__a : List[Any] = get_size_dict(snake_case_ , param_name='''crop_size''' )
__a : Tuple = do_resize
__a : int = size
__a : Optional[Any] = resample
__a : Optional[Any] = do_rescale
__a : List[str] = rescale_factor
__a : Any = do_center_crop
__a : Optional[int] = crop_size
__a : List[Any] = do_flip_channel_order
def lowerCAmelCase (self : int , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PIL.Image.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
__a : str = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
__a : Optional[Any] = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
__a : Union[str, Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(snake_case_ , data_format=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
__a : Dict = do_resize if do_resize is not None else self.do_resize
__a : List[Any] = resample if resample is not None else self.resample
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__a : List[str] = size if size is not None else self.size
__a : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : int = crop_size if crop_size is not None else self.crop_size
__a : Any = get_size_dict(snake_case_ , param_name='''crop_size''' )
__a : Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__a : Optional[int] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__a : Any = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__a : List[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__a : Dict = [self.flip_channel_order(image=snake_case_ ) for image in images]
__a : List[str] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__a : str = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Tuple] = None ):
__a : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(snake_case_ ):
__a : Optional[int] = target_sizes.numpy()
__a : int = []
for idx in range(len(snake_case_ ) ):
__a : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=snake_case_ )
__a : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
__a : List[str] = logits.argmax(dim=1 )
__a : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 521
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase__ =False
try:
lowercase__ =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase__ :
def __init__(self : Dict , snake_case_ : str = None , snake_case_ : list = [] ):
__a : List[Any] = 0
__a : Optional[int] = choices
__a : int = prompt
if sys.platform == "win32":
__a : int = '''*'''
else:
__a : List[Any] = '''➔ '''
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case_ )
else:
forceWrite(self.choices[index] , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : int ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(snake_case_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def lowerCAmelCase (self : Any , snake_case_ : Direction , snake_case_ : int = 1 ):
__a : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case_ )
move_cursor(snake_case_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def lowerCAmelCase (self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def lowerCAmelCase (self : List[str] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def lowerCAmelCase (self : List[Any] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def lowerCAmelCase (self : int ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case_ )] for number in range(1_0 )] )
def lowerCAmelCase (self : int ):
__a : str = int(chr(self.current_selection ) )
__a : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case_ )
else:
return
else:
return
def lowerCAmelCase (self : List[Any] , snake_case_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__a : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__a : str = int(builtins.input() )
except ValueError:
__a : Dict = default_choice
else:
__a : Union[str, Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(snake_case_ , '''\n''' )
return choice
| 521
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case__ : str = """scheduler_config.json"""
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Union[str, Any] = 1
_snake_case : int = 2
_snake_case : Optional[int] = 3
_snake_case : Optional[int] = 4
_snake_case : int = 5
@dataclass
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : jnp.ndarray
class _A :
'''simple docstring'''
_snake_case : Optional[int] = SCHEDULER_CONFIG_NAME
_snake_case : Dict = ["""dtype"""]
_snake_case : Dict = []
_snake_case : Union[str, Any] = True
@classmethod
def _snake_case ( cls : Dict , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
__lowercase , __lowercase = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
__lowercase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _snake_case ( self : List[str] , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[str] ):
'''simple docstring'''
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : int ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _snake_case ( cls : Union[str, Any] ):
'''simple docstring'''
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split("." )[0] )
__lowercase = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=jnp.floataa ):
def alpha_bar(_SCREAMING_SNAKE_CASE ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowercase = []
for i in range(_SCREAMING_SNAKE_CASE ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class _A :
'''simple docstring'''
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
@classmethod
def _snake_case ( cls : str , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = scheduler.config
if config.trained_betas is not None:
__lowercase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowercase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__lowercase = 1.0 - betas
__lowercase = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = state.alphas_cumprod
__lowercase = alphas_cumprod[timesteps] ** 0.5
__lowercase = sqrt_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
__lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase = sqrt_one_minus_alpha_prod.flatten()
__lowercase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 720
|
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655
| 0
|
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Input value must be an 'int' type" )
snake_case_ : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666
|
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
# A mock response for an HTTP head request to emulate server down
_lowercase =mock.Mock()
_lowercase =500
_lowercase ={}
_lowercase =HTTPError
_lowercase ={}
# Download this model to make sure it's in the cache.
_lowercase =WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
_lowercase =WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowercase =WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls ):
_lowercase =TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __lowerCAmelCase ( self ):
_lowercase =WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_lowercase =WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_lowercase =WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self ):
_lowercase =WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_lowercase =WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_lowercase =WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
_lowercase =CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_lowercase =AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 594
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( __a : List[Any] , __a : Dict , __a : Optional[Any]=None , __a : List[Any]=None ) -> str:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OPTConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=16 , lowerCAmelCase_=16 , ):
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
_lowercase =embed_dim
_lowercase =word_embed_proj_dim
_lowercase =False
def __lowerCAmelCase ( self ):
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
_lowercase =prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =TFOPTModel(config=lowerCAmelCase_ )
_lowercase =inputs_dict["input_ids"]
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict["attention_mask"][:1, :]
_lowercase =1
# first forward pass
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFOPTForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 10
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowercase =model_class(config=lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowercase =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
_lowercase =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
_lowercase =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
def __lowerCamelCase ( __a : Tuple ) -> Dict:
return tf.constant(__a , dtype=tf.intaa )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 99
def __lowerCAmelCase ( self ):
_lowercase =tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowercase =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowercase =input_ids.shape[0]
_lowercase =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModel.from_pretrained("facebook/opt-350m" )
_lowercase =_long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowercase =tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowercase =model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
_lowercase =(1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
_lowercase =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase ="facebook/opt-350m"
def __lowerCAmelCase ( self ):
_lowercase =TFOPTForCausalLM.from_pretrained(self.path_model )
_lowercase =GPTaTokenizer.from_pretrained(self.path_model )
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowercase =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-125m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
_lowercase ="left"
# use different length sentences to test batching
_lowercase =[
"Hello, my dog is a little",
"Today, I",
]
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ )
_lowercase =inputs["input_ids"]
_lowercase =model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"] )
_lowercase =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ )
_lowercase =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_lowercase =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 594
| 1
|
def A ( ) -> Optional[Any]:
UpperCamelCase__ :int = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowercase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 45
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase = ["""text""", """image""", """audio"""]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase_ ( UpperCamelCase__ : List ):
'''simple docstring'''
UpperCamelCase__ = []
for output in outputs:
if isinstance(UpperCamelCase__, (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__, (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__, (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __lowercase :
'''simple docstring'''
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
UpperCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A_ ( self : str ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase__ = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def A_ ( self : List[str] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 240
| 0
|
'''simple docstring'''
def snake_case ( a_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
UpperCamelCase_ : str = set()
# Replace all the whitespace in our sentence
UpperCamelCase_ : Dict = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(a_ ) == 26
def snake_case ( a_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
UpperCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase_ : Dict = True
elif char.isupper():
UpperCamelCase_ : Optional[int] = True
return all(a_ )
def snake_case ( a_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
UpperCamelCase_ : Dict = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=a_ ) )
print(timeit("""is_pangram_faster()""" , setup=a_ ) )
print(timeit("""is_pangram_fastest()""" , setup=a_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 543
|
'''simple docstring'''
from itertools import count
def snake_case ( a_ : int = 50 ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = [1] * min_block_length
for n in count(a_ ):
fill_count_functions.append(1 )
for block_length in range(a_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 543
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_lowercase ):
"""simple docstring"""
snake_case = ["torch", "scipy"]
def __init__( self : Optional[Any] , *_snake_case : List[Any] , **_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *_snake_case : Any , **_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowerCamelCase__ ( cls : List[str] , *_snake_case : str , **_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 115
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ : Dict = None
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCamelCase_ : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self : List[str] , _snake_case : Tuple=None , _snake_case : int=None , _snake_case : List[Any]="<s>" , _snake_case : Tuple="</s>" , _snake_case : str="</s>" , _snake_case : List[Any]="<s>" , _snake_case : Dict="<unk>" , _snake_case : str="<pad>" , _snake_case : Any="<mask>" , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : Any=None , **_snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "en_XX"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Tuple , _snake_case : str ) -> None:
"""simple docstring"""
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en_XX" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro_RO" , **_snake_case : str , ) -> BatchEncoding:
"""simple docstring"""
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Tuple , _snake_case : List[str] ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : List[str] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 115
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 3 ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__UpperCamelCase :Tuple = QuantumRegister(SCREAMING_SNAKE_CASE , '''qr''' )
__UpperCamelCase :Dict = ClassicalRegister(SCREAMING_SNAKE_CASE , '''cr''' )
__UpperCamelCase :Union[str, Any] = QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
__UpperCamelCase :str = Aer.get_backend('''qasm_simulator''' )
__UpperCamelCase :Union[str, Any] = execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """unispeech"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=0.5 , **__lowercase , ) -> Optional[int]:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :str = hidden_size
__UpperCamelCase :List[str] = feat_extract_norm
__UpperCamelCase :str = feat_extract_activation
__UpperCamelCase :str = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Any = conv_bias
__UpperCamelCase :List[Any] = num_conv_pos_embeddings
__UpperCamelCase :Tuple = num_conv_pos_embedding_groups
__UpperCamelCase :Optional[int] = len(self.conv_dim)
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :Any = hidden_dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :int = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Optional[Any] = layerdrop
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Tuple = num_ctc_classes
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :Dict = use_weighted_layer_sum
__UpperCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Optional[int] = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Any = mask_time_min_masks
__UpperCamelCase :Any = mask_feature_prob
__UpperCamelCase :str = mask_feature_length
__UpperCamelCase :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :Dict = num_codevector_groups
__UpperCamelCase :Optional[int] = contrastive_logits_temperature
__UpperCamelCase :Union[str, Any] = feat_quantizer_dropout
__UpperCamelCase :List[str] = num_negatives
__UpperCamelCase :Union[str, Any] = codevector_dim
__UpperCamelCase :int = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :List[Any] = ctc_loss_reduction
__UpperCamelCase :int = ctc_zero_infinity
# pretraining loss
__UpperCamelCase :Optional[Any] = replace_prob
@property
def UpperCamelCase__ ( self) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __A ( a_ : Any = 1_00_00_00 , a_ : Dict = 10 )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : defaultdict = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 698
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Optional[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : str = """TvltImageProcessor"""
UpperCAmelCase_ : Dict = """TvltFeatureExtractor"""
def __init__( self : str , __UpperCamelCase : Dict , __UpperCamelCase : str ):
'''simple docstring'''
super().__init__(image_processor=__UpperCamelCase , feature_extractor=__UpperCamelCase )
snake_case__ = image_processor
snake_case__ = feature_extractor
def __call__( self : Tuple , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=False , __UpperCamelCase : Any=False , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
snake_case__ = None
if images is not None:
snake_case__ = self.image_processor(__UpperCamelCase , mask_pixel=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if images_mixed is not None:
snake_case__ = self.image_processor(__UpperCamelCase , is_mixed=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
snake_case__ = self.feature_extractor(
__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , mask_audio=__UpperCamelCase , **__UpperCamelCase )
snake_case__ = {}
if audio is not None:
output_dict.update(__UpperCamelCase )
if images is not None:
output_dict.update(__UpperCamelCase )
if images_mixed_dict is not None:
output_dict.update(__UpperCamelCase )
return output_dict
@property
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
snake_case__ = self.image_processor.model_input_names
snake_case__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 705
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a__ = logging.get_logger(__name__)
a__ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Dict = """layoutlmv3"""
def __init__( self : Dict , __UpperCamelCase : Tuple=5_0_2_6_5 , __UpperCamelCase : Dict=7_6_8 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Any=3_0_7_2 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict=5_1_2 , __UpperCamelCase : Any=2 , __UpperCamelCase : Optional[int]=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Dict=1_0_2_4 , __UpperCamelCase : Dict=1_2_8 , __UpperCamelCase : str=1_2_8 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : str=1_2_8 , __UpperCamelCase : List[str]=6_4 , __UpperCamelCase : Optional[int]=2_5_6 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=2_2_4 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Union[str, Any]=1_6 , __UpperCamelCase : int=None , **__UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(
vocab_size=__UpperCamelCase , hidden_size=__UpperCamelCase , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , intermediate_size=__UpperCamelCase , hidden_act=__UpperCamelCase , hidden_dropout_prob=__UpperCamelCase , attention_probs_dropout_prob=__UpperCamelCase , max_position_embeddings=__UpperCamelCase , type_vocab_size=__UpperCamelCase , initializer_range=__UpperCamelCase , layer_norm_eps=__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ = max_ad_position_embeddings
snake_case__ = coordinate_size
snake_case__ = shape_size
snake_case__ = has_relative_attention_bias
snake_case__ = rel_pos_bins
snake_case__ = max_rel_pos
snake_case__ = has_spatial_attention_bias
snake_case__ = rel_ad_pos_bins
snake_case__ = max_rel_ad_pos
snake_case__ = text_embed
snake_case__ = visual_embed
snake_case__ = input_size
snake_case__ = num_channels
snake_case__ = patch_size
snake_case__ = classifier_dropout
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : List[Any] = version.parse("""1.12""" )
@property
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
return 1_2
def __lowerCAmelCase( self : str , __UpperCamelCase : "ProcessorMixin" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 4_0 , __UpperCamelCase : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , __UpperCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ = processor.tokenizer.num_special_tokens_to_add(__UpperCamelCase )
snake_case__ = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case__ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case__ = self._generate_dummy_images(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ = dict(
processor(
__UpperCamelCase , text=__UpperCamelCase , boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , ) )
return inputs
| 566
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
__a : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Any = []
if args.gold_data_mode == "qa":
__a : str = pd.read_csv(lowerCamelCase_ , sep='\t' , header=lowerCamelCase_ )
for answer_list in data[1]:
__a : List[str] = ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
__a : List[str] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Optional[Any] = [[reference] for reference in references]
__a : Tuple = 0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a : List[Any] = 100.0 * em / total
__a : int = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
__a : str = args.k
__a : Optional[Any] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Tuple = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
__a : Optional[Any] = 0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
__a : Dict = set(hypo.split('\t' )[:k] )
__a : List[str] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__a : List[Any] = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
def strip_title(lowerCamelCase_ : Dict ):
if title.startswith('"' ):
__a : List[str] = title[1:]
if title.endswith('"' ):
__a : str = title[:-1]
return title
__a : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['input_ids'].to(args.device )
__a : Union[str, Any] = rag_model.rag.question_encoder(lowerCamelCase_ )
__a : List[Any] = question_enc_outputs[0]
__a : str = rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__a : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__a : List[Any] = []
for docs in all_docs:
__a : Tuple = [strip_title(lowerCamelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase_ ) )
return provenance_strings
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
with torch.no_grad():
__a : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
__a : int = inputs_dict.input_ids.to(args.device )
__a : Dict = inputs_dict.attention_mask.to(args.device )
__a : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__a : Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def UpperCAmelCase__ ( ):
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase_ , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=lowerCamelCase_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__a : Tuple = parser.parse_args()
__a : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
__a : Union[str, Any] = {}
if args.model_type is None:
__a : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__a : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__a : List[Any] = args.n_docs
if args.index_name is not None:
__a : Union[str, Any] = args.index_name
if args.index_path is not None:
__a : Union[str, Any] = args.index_path
else:
__a : List[Any] = BartForConditionalGeneration
__a : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase_ )
__a : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__a : int = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__a : Union[str, Any] = RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__a : List[Any] = model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
__a : List[str] = model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__a : List[Any] = []
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
__a : Tuple = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) + '\n' )
preds_file.flush()
__a : Optional[int] = []
if len(lowerCamelCase_ ) > 0:
__a : int = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 47
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47
| 1
|
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ):
def update_area_of_max_square(snake_case__ : int , snake_case__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case__ : List[Any] = update_area_of_max_square(snake_case__ , col + 1 )
snake_case__ : int = update_area_of_max_square(row + 1 , col + 1 )
snake_case__ : Dict = update_area_of_max_square(row + 1 , snake_case__ )
if mat[row][col]:
snake_case__ : Union[str, Any] = 1 + min([right, diagonal, down] )
snake_case__ : List[Any] = max(largest_square_area[0] , snake_case__ )
return sub_problem_sol
else:
return 0
snake_case__ : Dict = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case__ : List[str] = update_area_of_max_square_using_dp_array(snake_case__ , col + 1 , snake_case__ )
snake_case__ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case__ )
snake_case__ : str = update_area_of_max_square_using_dp_array(row + 1 , snake_case__ , snake_case__ )
if mat[row][col]:
snake_case__ : Optional[int] = 1 + min([right, diagonal, down] )
snake_case__ : Dict = max(largest_square_area[0] , snake_case__ )
snake_case__ : List[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case__ : Tuple = [0]
snake_case__ : List[str] = [[-1] * cols for _ in range(snake_case__ )]
update_area_of_max_square_using_dp_array(0 , 0 , snake_case__ )
return largest_square_area[0]
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ):
snake_case__ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case__ : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case__ : int = dp_array[row][col + 1]
snake_case__ : Optional[Any] = dp_array[row + 1][col + 1]
snake_case__ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case__ : Union[str, Any] = 1 + min(snake_case__ , snake_case__ , snake_case__ )
snake_case__ : Union[str, Any] = max(dp_array[row][col] , snake_case__ )
else:
snake_case__ : Dict = 0
return largest_square_area
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : list[list[int]] ):
snake_case__ : int = [0] * (cols + 1)
snake_case__ : Optional[int] = [0] * (cols + 1)
snake_case__ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case__ : Optional[Any] = current_row[col + 1]
snake_case__ : str = next_row[col + 1]
snake_case__ : Optional[Any] = next_row[col]
if mat[row][col] == 1:
snake_case__ : Union[str, Any] = 1 + min(snake_case__ , snake_case__ , snake_case__ )
snake_case__ : List[str] = max(current_row[col] , snake_case__ )
else:
snake_case__ : Dict = 0
snake_case__ : List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 717
|
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: List[Any]=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: str=99 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=0 ,__lowerCAmelCase: Optional[int]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = projection_dim
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : Optional[int] = bos_token_id
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : Optional[int] = input_mask.numpy()
_lowerCamelCase, _lowerCamelCase : str = input_mask.shape
_lowerCamelCase : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = TFBlipTextModel(config=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = BlipTextModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase )
| 46
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=True ,UpperCAmelCase="pt" ):
'''simple docstring'''
A__ = {'add_prefix_space': True} if isinstance(UpperCAmelCase ,UpperCAmelCase ) and not line.startswith(' ' ) else {}
A__ = padding_side
return tokenizer(
[line] ,max_length=UpperCAmelCase ,padding='max_length' if pad_to_max_length else None ,truncation=UpperCAmelCase ,return_tensors=UpperCAmelCase ,add_special_tokens=UpperCAmelCase ,**UpperCAmelCase ,)
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=None ,):
'''simple docstring'''
A__ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case( UpperCAmelCase ):
def __init__(self : List[str] , a : Any , a : Union[str, Any] , a : int , a : Optional[int] , a : Optional[Any]="train" , a : Optional[int]=None , a : Tuple=None , a : str=None , a : Dict="" , ) -> Dict:
"""simple docstring"""
super().__init__()
A__ = Path(a ).joinpath(type_path + '.source' )
A__ = Path(a ).joinpath(type_path + '.target' )
A__ = self.get_char_lens(self.src_file )
A__ = max_source_length
A__ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A__ = tokenizer
A__ = prefix
if n_obs is not None:
A__ = self.src_lens[:n_obs]
A__ = src_lang
A__ = tgt_lang
def __len__(self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self : Any , a : str ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = index + 1 # linecache starts at 1
A__ = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
A__ = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
A__ = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
A__ = encode_line(a , a , self.max_source_length , 'right' )
A__ = encode_line(a , a , self.max_target_length , 'right' )
A__ = source_inputs['input_ids'].squeeze()
A__ = target_inputs['input_ids'].squeeze()
A__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase (a : Tuple ) -> Tuple:
"""simple docstring"""
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCamelCase (self : str , a : int ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = torch.stack([x['input_ids'] for x in batch] )
A__ = torch.stack([x['attention_mask'] for x in batch] )
A__ = torch.stack([x['decoder_input_ids'] for x in batch] )
A__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = trim_batch(a , a )
A__ , A__ = trim_batch(a , a , attention_mask=a )
A__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase_ = getLogger(__name__)
def _A ( UpperCAmelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = get_git_info()
save_json(UpperCAmelCase ,os.path.join(UpperCAmelCase ,'git_log.json' ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=4 ,**UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'w' ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase ,indent=UpperCAmelCase ,**UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = git.Repo(search_parent_directories=UpperCAmelCase )
A__ = {
'repo_id': str(UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return list(map(UpperCAmelCase ,UpperCAmelCase ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'wb' ) as f:
return pickle.dump(UpperCAmelCase ,UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
def remove_articles(UpperCAmelCase ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
A__ = sum(common.values() )
if num_same == 0:
return 0
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = (2 * precision * recall) / (precision + recall)
return fa
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A__ = 0
for hypo, pred in zip(UpperCAmelCase ,UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase ,UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def _A ( UpperCAmelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A__ = 'dropout_rate'
for p in extra_params:
if getattr(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
if not hasattr(UpperCAmelCase ,UpperCAmelCase ) and not hasattr(UpperCAmelCase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
continue
A__ = p if hasattr(UpperCAmelCase ,UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase ,UpperCAmelCase ,getattr(UpperCAmelCase ,UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
return hparams, config
| 531
| 0
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowercase ( _a , _a , _a ):
snake_case_ : List[str] = x
snake_case_ : Tuple = y
for step in range(_a ): # noqa: B007
snake_case_ : Tuple = a * a - b * b + x
snake_case_ : Dict = 2 * a * b + y
snake_case_ : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowercase ( _a ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowercase ( _a ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_a , 1 , 1 ) )
def __lowercase ( _a = 800 , _a = 600 , _a = -0.6 , _a = 0 , _a = 3.2 , _a = 50 , _a = True , ):
snake_case_ : Tuple = Image.new('''RGB''' , (image_width, image_height) )
snake_case_ : Union[str, Any] = img.load()
# loop through the image-coordinates
for image_x in range(_a ):
for image_y in range(_a ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ : Tuple = figure_width / image_width * image_height
snake_case_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ : Dict = get_distance(_a , _a , _a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ : str = get_color_coded_rgb(_a )
else:
snake_case_ : int = get_black_and_white_rgb(_a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ : List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 485
|
"""simple docstring"""
lowercase__ : Union[str, Any] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 485
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 27
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Dict = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 392
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
a_ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
a_ : int = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
a_ : Any = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 148
|
import random
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ , a__ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
a__ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
a__ = 0
a__ , a__ , a__ = _partition(__UpperCAmelCase , __UpperCAmelCase )
a__ = len(__UpperCAmelCase )
a__ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 148
| 1
|
import os
def lowerCamelCase__ ( snake_case_ : str = "input.txt" ) -> List[Any]:
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file:
__snake_case = [
[int(__lowerCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
__snake_case = len(__lowerCAmelCase )
__snake_case = len(matrix[0] )
__snake_case = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
__snake_case = matrix[i][0]
for j in range(1 , __lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
__snake_case = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCAmelCase ):
__snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 592
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = time.time()
locka.acquire(SCREAMING_SNAKE_CASE_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """a""" * 10_00 + """.lock"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
locka.acquire(0 )
| 591
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _a :
"""simple docstring"""
def __init__( self ) -> str:
_SCREAMING_SNAKE_CASE = psutil.Process()
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = -1
while True:
_SCREAMING_SNAKE_CASE = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = threading.Thread(target=self.peak_monitor )
_SCREAMING_SNAKE_CASE = True
self.thread.start()
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase__ : Tuple = PeakCPUMemory()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
# Time
_SCREAMING_SNAKE_CASE = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
# Time
_SCREAMING_SNAKE_CASE = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
_SCREAMING_SNAKE_CASE = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
_SCREAMING_SNAKE_CASE = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
return measures
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB" )
_SCREAMING_SNAKE_CASE = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 591
| 1
|
'''simple docstring'''
def snake_case__ ( _A: list , _A: list , _A: int , _A: int , _A: int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(_A , _A , _A , _A , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
_A , _A , _A , max_weight - weights[index] , index + 1 )
return max(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case__ ( _A: list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
lowerCAmelCase = {"""+""", """-""", """*""", """/"""}
lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
lowerCAmelCase , lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase_ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
UpperCAmelCase_ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase_ = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(snake_case_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase_ = val.squeeze_()
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict="groupvit-gcc-yfcc" , snake_case_ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = GroupViTConfig()
UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval()
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"]
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0)
# verify result
UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 )
processor.save_pretrained(snake_case_ )
model.save_pretrained(snake_case_ )
print("Successfully saved processor and model to" , snake_case_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ , organization="nielsr" )
model.push_to_hub(snake_case_ , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 78
|
'''simple docstring'''
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(A_ ) + """/grid.txt""" ) as f:
a_ : Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(A_ ) for x in f.readline().split()] )
a_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a_ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
a_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a_ : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a_ : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a_ : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 577
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> Any:
if isinstance(UpperCamelCase ,torch.Tensor ):
return image
elif isinstance(UpperCamelCase ,PIL.Image.Image ):
UpperCAmelCase_ : Dict = [image]
UpperCAmelCase_ : Any = [trans(img.convert('RGB' ) ) for img in image]
UpperCAmelCase_ : str = torch.stack(UpperCamelCase )
return image
class lowercase ( a_ ):
def __init__( self , _snake_case , _snake_case) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_ : Any = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
def _snake_case ( self , _snake_case) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""")
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Tuple:
# get the original timestep using init_timestep
UpperCAmelCase_ : List[str] = min(int(num_inference_steps * strength) , _snake_case)
UpperCAmelCase_ : Any = max(num_inference_steps - init_timestep , 0)
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None) -> Optional[int]:
if not isinstance(_snake_case , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_snake_case)}""")
UpperCAmelCase_ : Optional[Any] = image.to(device=_snake_case , dtype=_snake_case)
if isinstance(_snake_case , _snake_case) and len(_snake_case) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_snake_case)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
UpperCAmelCase_ : List[str] = init_latents.shape
UpperCAmelCase_ : List[str] = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case)
# get latents
print('add noise to latents at timestep' , _snake_case)
UpperCAmelCase_ : Optional[Any] = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ : str = init_latents
return latents
@torch.no_grad()
def __call__( self , _snake_case = None , _snake_case = 0.8 , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_snake_case)
# 2. Preprocess image
UpperCAmelCase_ : Tuple = preprocess(_snake_case)
# 3. set timesteps
self.scheduler.set_timesteps(_snake_case , device=self.device)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_timesteps(_snake_case , _snake_case , self.device)
UpperCAmelCase_ : List[Any] = timesteps[:1].repeat(_snake_case)
# 4. Prepare latent variables
UpperCAmelCase_ : List[Any] = self.prepare_latents(_snake_case , _snake_case , _snake_case , self.unet.dtype , self.device , _snake_case)
UpperCAmelCase_ : List[str] = latents
# 5. Denoising loop
for t in self.progress_bar(_snake_case):
# 1. predict noise model_output
UpperCAmelCase_ : Optional[Any] = self.unet(_snake_case , _snake_case).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_ : List[Any] = self.scheduler.step(
_snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case , ).prev_sample
UpperCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1)
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_snake_case)
| 471
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> str:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : str = scope
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : str = OpenLlamaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ : List[str] = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> int:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = OpenLlamaModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
UpperCAmelCase_ : Any = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Any:
UpperCAmelCase_ : Optional[int] = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
# first forward pass
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
UpperCAmelCase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ : Optional[Any] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def _snake_case ( self) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( a_, a_, a_, unittest.TestCase ):
_lowerCamelCase : Dict= (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCamelCase : str= (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : Optional[int]= (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple= False
_lowerCamelCase : int= False
def _snake_case ( self) -> int:
UpperCAmelCase_ : Any = OpenLlamaModelTester(self)
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def _snake_case ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self) -> str:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[Any] = type
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : List[str] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : List[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Tuple = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : int = 'single_label_classification'
UpperCAmelCase_ : int = input_dict['input_ids']
UpperCAmelCase_ : Any = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : int = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[str] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Any = 'multi_label_classification'
UpperCAmelCase_ : Tuple = input_dict['input_ids']
UpperCAmelCase_ : str = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[Any] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _snake_case ( self) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _snake_case ( self , _snake_case) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : int = OpenLlamaModel(_snake_case)
original_model.to(_snake_case)
original_model.eval()
UpperCAmelCase_ : Optional[Any] = original_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Tuple = original_model(_snake_case).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Dict = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase_ : Union[str, Any] = OpenLlamaModel(_snake_case)
scaled_model.to(_snake_case)
scaled_model.eval()
UpperCAmelCase_ : Dict = scaled_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Optional[int] = scaled_model(_snake_case).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
| 471
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(lowercase , params=lowercase ).content , "html.parser" )
SCREAMING_SNAKE_CASE : Union[str, Any] = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
snake_case = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 62
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
_lowerCAmelCase : Optional[int] = 2**power
_lowerCAmelCase : str = str(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = list(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
_a : str = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_a : Tuple = solution(power)
print('Sum of the digits is: ', result)
| 213
| 0
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
for param in module.parameters():
lowercase = False
def SCREAMING_SNAKE_CASE ( ):
lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE ( lowercase_ : Any ):
lowercase = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
lowercase = datetime.now()
lowercase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 713
|
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = "mgp-str"
def __init__( self , A_=[32, 128] , A_=4 , A_=3 , A_=27 , A_=38 , A_=5_0257 , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=4.0 , A_=True , A_=False , A_=1e-5 , A_=0.0 , A_=0.0 , A_=0.0 , A_=False , A_=0.0_2 , **A_ , ) -> List[str]:
super().__init__(**A_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = max_token_length
lowerCAmelCase = num_character_labels
lowerCAmelCase = num_bpe_labels
lowerCAmelCase = num_wordpiece_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = mlp_ratio
lowerCAmelCase = distilled
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = drop_rate
lowerCAmelCase = qkv_bias
lowerCAmelCase = attn_drop_rate
lowerCAmelCase = drop_path_rate
lowerCAmelCase = output_aa_attentions
lowerCAmelCase = initializer_range
| 433
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowerCAmelCase , )
| 433
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowerCamelCase_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowerCamelCase_ = {
"allenai/longformer-base-4096": 4096,
"allenai/longformer-large-4096": 4096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =(
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ =bs[:]
SCREAMING_SNAKE_CASE__ =0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ =[chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase, __UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =set()
SCREAMING_SNAKE_CASE__ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ =char
return pairs
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int]="replace" ,_UpperCamelCase : List[str]="<s>" ,_UpperCamelCase : Any="</s>" ,_UpperCamelCase : Union[str, Any]="</s>" ,_UpperCamelCase : List[str]="<s>" ,_UpperCamelCase : int="<unk>" ,_UpperCamelCase : str="<pad>" ,_UpperCamelCase : int="<mask>" ,_UpperCamelCase : Optional[int]=False ,**_UpperCamelCase : Dict ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,**_UpperCamelCase ,)
with open(_UpperCamelCase ,encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ =json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ =errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ =bytes_to_unicode()
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase ,encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ =merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ =[tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ =dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ =re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return len(self.encoder )
def __A ( self : str ) -> Any:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __A ( self : List[str] ,_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ =min(_UpperCamelCase ,key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =bigram
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =0
while i < len(_UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE__ =word.index(_UpperCamelCase ,_UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ =j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =new_word
if len(_UpperCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =word
return word
def __A ( self : str ,_UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for token in re.findall(self.pat ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ="""""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def __A ( self : Any ,_UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase ,self.encoder.get(self.unk_token ) )
def __A ( self : int ,_UpperCamelCase : Dict ) -> List[str]:
'''simple docstring'''
return self.decoder.get(_UpperCamelCase )
def __A ( self : str ,_UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""""".join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __A ( self : int ,_UpperCamelCase : str ,_UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_UpperCamelCase ,ensure_ascii=_UpperCamelCase ) + """\n""" )
SCREAMING_SNAKE_CASE__ =0
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ =token_index
writer.write(""" """.join(_UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self : Union[str, Any] ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ =[self.cls_token_id]
SCREAMING_SNAKE_CASE__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : str ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ,_UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __A ( self : Any ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Dict ,_UpperCamelCase : List[Any] ,_UpperCamelCase : List[Any]=False ,**_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ =""" """ + text
return (text, kwargs)
| 709
|
import math
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =2
SCREAMING_SNAKE_CASE__ =int(math.sqrt(__UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE__ =[True] * (end + 1)
SCREAMING_SNAKE_CASE__ =[]
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCamelCase )
for i in range(start * start, end + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE__ =end + 1
SCREAMING_SNAKE_CASE__ =min(2 * end, __UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE__ =[True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE__ =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCamelCase, high + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
for j in range(len(__UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE__ =high + 1
SCREAMING_SNAKE_CASE__ =min(high + end, __UpperCamelCase )
return prime
print(sieve(10**6))
| 588
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class snake_case_ ( _lowerCAmelCase ):
__lowerCAmelCase = field(
default=0.0 ,metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCAmelCase = field(default=_lowerCAmelCase ,metadata={"help": "Whether to SortishSamler or not."} )
__lowerCAmelCase = field(
default=_lowerCAmelCase ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCAmelCase = field(default=_lowerCAmelCase ,metadata={"help": "whether to use adafactor"} )
__lowerCAmelCase = field(
default=_lowerCAmelCase ,metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase = field(
default=_lowerCAmelCase ,metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase = field(default=_lowerCAmelCase ,metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCAmelCase = field(
default=_lowerCAmelCase ,metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCAmelCase = field(
default="linear" ,metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
| 237
|
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] = 2 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : List[Any] = 3 , ):
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> int:
return (pow(A_ , 2 ) + step) % modulus
for _ in range(A_ ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
lowerCamelCase__ = rand_fn(A_ , A_ , A_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , A_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
UpperCamelCase : Tuple = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}')
| 709
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : str = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Optional[Any] ,**_UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ =deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ =not kwargs.pop(_UpperCamelCase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE__ =kwargs.pop("""tpu_name""" ,self.tpu_name )
SCREAMING_SNAKE_CASE__ =kwargs.pop("""device_idx""" ,self.device_idx )
SCREAMING_SNAKE_CASE__ =kwargs.pop("""eager_mode""" ,self.eager_mode )
SCREAMING_SNAKE_CASE__ =kwargs.pop("""use_xla""" ,self.use_xla )
super().__init__(**_UpperCamelCase )
_A : str = field(
default=__lowerCamelCase , metadata={"help": "Name of TPU"} , )
_A : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_A : bool = field(default=__lowerCamelCase , metadata={"help": "Benchmark models in eager model."} )
_A : bool = field(
default=__lowerCamelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __A ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self ,["""tf"""] )
SCREAMING_SNAKE_CASE__ =None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ =None
return tpu
@cached_property
def __A ( self : Dict ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self ,["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" )
SCREAMING_SNAKE_CASE__ =tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE__ =tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __A ( self : Dict ) -> bool:
'''simple docstring'''
requires_backends(self ,["""tf"""] )
return self._setup_tpu is not None
@property
def __A ( self : Any ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self ,["""tf"""] )
return self._setup_strategy
@property
def __A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __A ( self : Dict ) -> int:
'''simple docstring'''
requires_backends(self ,["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __A ( self : List[Any] ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 151
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase_ ( __UpperCamelCase = 8 ):
SCREAMING_SNAKE_CASE__ =ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =i // 3
SCREAMING_SNAKE_CASE__ =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE__ =(
chars_incl
+ random(__UpperCamelCase, quotient + remainder )
+ random(__UpperCamelCase, __UpperCamelCase )
+ random(__UpperCamelCase, __UpperCamelCase )
)
SCREAMING_SNAKE_CASE__ =list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
pass # Put your code here...
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 8 ):
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE__ =any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE__ =any(char in digits for char in password )
SCREAMING_SNAKE_CASE__ =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =int(input("""Please indicate the max length of your password: """ ).strip() )
SCREAMING_SNAKE_CASE__ =input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(__UpperCamelCase ) )
print(
"""Alternative Password generated:""", alternative_password_generator(__UpperCamelCase, __UpperCamelCase ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 151
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 322
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE : int = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=3_6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=1_6 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = embedding_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_hidden_groups
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def _UpperCAmelCase ( self ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = AlbertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , sentence_order_label=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
a__ = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = self.num_labels
a__ = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
a__ = self.num_labels
a__ = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = self.num_choices
a__ = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = True
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
a__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def _UpperCAmelCase ( self ) -> Dict:
a__ = AlbertModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = AlbertModel.from_pretrained('''albert-base-v2''' )
a__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
a__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
a__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 194
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
A = {'facebook/blenderbot_small-90M': 5_1_2}
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[Any]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(lowerCamelCase__ )
return pairs
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[Any]="__start__" , snake_case : Optional[int]="__end__" , snake_case : Tuple="__unk__" , snake_case : List[Any]="__null__" , **snake_case : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
A = json.load(snake_case )
A = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(snake_case , range(len(snake_case ) ) ) )
A = {}
@property
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
return len(self.encoder )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : str , snake_case : str ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A = re.sub('([.,!?()])' , r' \1' , snake_case )
A = re.sub('(\')' , r' \1 ' , snake_case )
A = re.sub(r'\s{2,}' , ' ' , snake_case )
if "\n" in token:
A = token.replace('\n' , ' __newln__' )
A = token.split(' ' )
A = []
for token in tokens:
if not len(snake_case ):
continue
A = token.lower()
A = tuple(snake_case )
A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A = get_pairs(snake_case )
if not pairs:
words.append(snake_case )
continue
while True:
A = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(snake_case ):
try:
A = word.index(snake_case , snake_case )
new_word.extend(word[i:j] )
A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(snake_case )
A = new_word
if len(snake_case ) == 1:
break
else:
A = get_pairs(snake_case )
A = '@@ '.join(snake_case )
A = word[:-4]
A = word
words.append(snake_case )
return " ".join(snake_case )
def A_ ( self : Tuple , snake_case : str ) -> List[str]:
'''simple docstring'''
A = []
A = re.findall(r'\S+\n?' , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(' ' ) ) )
return split_tokens
def A_ ( self : Optional[Any] , snake_case : str ) -> int:
'''simple docstring'''
A = token.lower()
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def A_ ( self : Any , snake_case : int ) -> str:
'''simple docstring'''
return self.decoder.get(snake_case , self.unk_token )
def A_ ( self : List[str] , snake_case : List[str] ) -> str:
'''simple docstring'''
A = ' '.join(snake_case ).replace('@@ ' , '' ).strip()
return out_string
def A_ ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
A = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 109
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE_ : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ : List[str] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 101
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCAmelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = parse_args()
# Import training_script as a module.
lowerCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ = script_fpath.stem
lowerCAmelCase__ = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
lowerCAmelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 61
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Any ={
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowercase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=7 , _lowerCAmelCase: int=3 , _lowerCAmelCase: int=18 , _lowerCAmelCase: Optional[int]=30 , _lowerCAmelCase: List[str]=400 , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Optional[int]=True , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =image_size
UpperCAmelCase_ =min_resolution
UpperCAmelCase_ =max_resolution
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =apply_ocr
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( __lowercase , unittest.TestCase ):
_snake_case =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "apply_ocr" ) )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _lowerCAmelCase )
self.assertIsInstance(encoding.boxes , _lowerCAmelCase )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ =[["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase_ =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCAmelCase )
self.assertListEqual(encoding.boxes , _lowerCAmelCase )
# with apply_OCR = False
UpperCAmelCase_ =LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase )
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 550
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] =logging.get_logger()
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : List[nn.Module] = field(default_factory=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : list = field(default_factory=lowerCAmelCase__ )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List = field(default_factory=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List = field(default_factory=lowerCAmelCase__ )
def __call__( self , a__ ):
_lowerCamelCase = Tracker(self.dest )(a__ ).parametrized
_lowerCamelCase = Tracker(self.src )(a__ ).parametrized
_lowerCamelCase = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
_lowerCamelCase = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(a__ )} operations while'
F' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : ResNetConfig , snake_case : Path , snake_case : bool = True )-> List[str]:
print(f'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(snake_case , pretrained=snake_case ).eval()
_lowerCamelCase = ResNetForImageClassification(snake_case ).eval()
_lowerCamelCase = ModuleTransfer(src=snake_case , dest=snake_case )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case )
assert torch.allclose(from_model(snake_case ) , our_model(snake_case ).logits ), "The model logits don't match the original one."
_lowerCamelCase = f'resnet{"-".join(name.split("resnet" ) )}'
print(snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=snake_case , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=snake_case , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Path , snake_case : str = None , snake_case : bool = True )-> Tuple:
_lowerCamelCase = 'imagenet-1k-id2label.json'
_lowerCamelCase = 1_000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
_lowerCamelCase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
A_ : Dict =parser.parse_args()
A_ : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 650
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : int =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
self.check_model_type(a__ )
def snake_case_ ( self , a__=None , a__=None , a__=None , **a__ ):
_lowerCamelCase , _lowerCamelCase = {}, {}
if padding is not None:
_lowerCamelCase = padding
if truncation is not None:
_lowerCamelCase = truncation
if top_k is not None:
_lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a__ , a__ = None , **a__ ):
if isinstance(a__ , (Image.Image, str) ) and isinstance(a__ , a__ ):
_lowerCamelCase = {'image': image, 'question': question}
else:
_lowerCamelCase = image
_lowerCamelCase = super().__call__(a__ , **a__ )
return results
def snake_case_ ( self , a__ , a__=False , a__=False ):
_lowerCamelCase = load_image(inputs['image'] )
_lowerCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a__ , truncation=a__ )
_lowerCamelCase = self.image_processor(images=a__ , return_tensors=self.framework )
model_inputs.update(a__ )
return model_inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.model(**a__ )
return model_outputs
def snake_case_ ( self , a__ , a__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.sigmoid()[0]
_lowerCamelCase , _lowerCamelCase = probs.topk(a__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 650
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 91
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """lilt"""
def __init__( self : Optional[Any] , _lowercase : Dict=30_522 , _lowercase : Any=768 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : str=3_072 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=512 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : Any=0 , _lowercase : List[str]="absolute" , _lowercase : Dict=None , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=1_024 , **_lowercase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = classifier_dropout
A = channel_shrink_ratio
A = max_ad_position_embeddings
| 91
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE ='''_'''
if count > 1:
return False
else:
return "".join(a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
while True:
_SCREAMING_SNAKE_CASE =['''$'''] * len(a__)
_SCREAMING_SNAKE_CASE =[]
for i in range(len(a__)):
for j in range(i + 1 ,len(a__)):
_SCREAMING_SNAKE_CASE =compare_string(binary[i] ,binary[j])
if k is False:
_SCREAMING_SNAKE_CASE ='''*'''
_SCREAMING_SNAKE_CASE ='''*'''
temp.append('''X''')
for i in range(len(a__)):
if checka[i] == "$":
pi.append(binary[i])
if len(a__) == 0:
return pi
_SCREAMING_SNAKE_CASE =list(set(a__))
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
for minterm in minterms:
_SCREAMING_SNAKE_CASE =''''''
for _ in range(a__):
_SCREAMING_SNAKE_CASE =str(minterm % 2) + string
minterm //= 2
temp.append(a__)
return temp
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[0] * len(a__)
for i in range(len(chart[0])):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =-1
for j in range(len(a__)):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE =j
if count == 1:
_SCREAMING_SNAKE_CASE =1
for i in range(len(a__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a__)):
_SCREAMING_SNAKE_CASE =0
temp.append(prime_implicants[i])
while True:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
_SCREAMING_SNAKE_CASE =chart[i].count(1)
if count_n > max_n:
_SCREAMING_SNAKE_CASE =count_n
_SCREAMING_SNAKE_CASE =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a__)):
_SCREAMING_SNAKE_CASE =0
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[[0 for x in range(len(a__))] for x in range(len(a__))]
for i in range(len(a__)):
_SCREAMING_SNAKE_CASE =prime_implicants[i].count('''_''')
for j in range(len(a__)):
if is_for_table(prime_implicants[i] ,binary[j] ,a__):
_SCREAMING_SNAKE_CASE =1
return chart
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =int(input('''Enter the no. of variables\n'''))
_SCREAMING_SNAKE_CASE =[
float(a__)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
_SCREAMING_SNAKE_CASE =decimal_to_binary(a__ ,a__)
_SCREAMING_SNAKE_CASE =check(a__)
print('''Prime Implicants are:''')
print(a__)
_SCREAMING_SNAKE_CASE =prime_implicant_chart(a__ ,a__)
_SCREAMING_SNAKE_CASE =selection(a__ ,a__)
print('''Essential Prime Implicants are:''')
print(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 691
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset
| 691
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["MobileViTFeatureExtractor"]
A_ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 384
|
'''simple docstring'''
A_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
A_ = ["a", "b", "c", "d", "e"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = start
# add current to visited
visited.append(__UpperCamelCase )
lowerCamelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
A_ = topological_sort("a", [], [])
print(sort)
| 384
| 1
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'EncodecFeatureExtractor'
__lowerCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor
UpperCAmelCase__ : Optional[int] = False
def __UpperCAmelCase ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""audio""" , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""sampling_rate""" , _lowerCAmelCase )
UpperCAmelCase__ : int = kwargs.pop("""text""" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ : Union[str, Any] = args[0]
UpperCAmelCase__ : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCAmelCase__ : Tuple = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase__ : List[str] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCAmelCase__ : Union[str, Any] = audio_inputs["""padding_mask"""]
return inputs
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : Any = kwargs.pop("""audio""" , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""padding_mask""" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase__ : Optional[Any] = args[0]
UpperCAmelCase__ : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Dict = to_numpy(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
UpperCAmelCase__ : int = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase__ : Any = seq_len - padding_mask.shape[-1]
UpperCAmelCase__ : Optional[Any] = 1 - self.feature_extractor.padding_value
UpperCAmelCase__ : List[str] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=_lowerCAmelCase )
UpperCAmelCase__ : str = audio_values.tolist()
for i in range(_lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase__ : Optional[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 79
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'van'
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=3 , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[64, 128, 320, 512] , _lowerCAmelCase=[3, 3, 12, 3] , _lowerCAmelCase=[8, 8, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-6 , _lowerCAmelCase=1e-2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = patch_sizes
UpperCAmelCase__ : int = strides
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : str = depths
UpperCAmelCase__ : Optional[Any] = mlp_ratios
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : int = drop_path_rate
UpperCAmelCase__ : Dict = dropout_rate
| 79
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """microsoft/speecht5_tts"""
snake_case_ = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
snake_case_ = """text_reader"""
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ["""text"""]
snake_case_ = ["""audio"""]
def _lowerCamelCase ( self : str ) -> List[str]:
if self.post_processor is None:
_lowercase = "microsoft/speecht5_hifigan"
super().setup()
def _lowerCamelCase ( self : int , _lowercase : int , _lowercase : List[str]=None ) -> Tuple:
_lowercase = self.pre_processor(text=_lowercase , return_tensors="pt" , truncation=_lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_lowercase = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_lowercase = torch.tensor(embeddings_dataset[7_3_0_5]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self : str , _lowercase : int ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**_lowercase )
def _lowerCamelCase ( self : int , _lowercase : Optional[Any] ) -> List[str]:
with torch.no_grad():
return self.post_processor(_lowercase ).cpu().detach()
| 227
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCamelCase : Any = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : str ):
_lowercase = BertAbsConfig(
temp_dir=".", finetune_bert=_snake_case, large=_snake_case, share_emb=_snake_case, use_bert_emb=_snake_case, encoder="bert", max_pos=5_1_2, enc_layers=6, enc_hidden_size=5_1_2, enc_heads=8, enc_ff_size=5_1_2, enc_dropout=0.2, dec_layers=6, dec_hidden_size=7_6_8, dec_heads=8, dec_ff_size=2_0_4_8, dec_dropout=0.2, )
_lowercase = torch.load(_snake_case, lambda _snake_case, _snake_case : storage )
_lowercase = AbsSummarizer(_snake_case, torch.device("cpu" ), _snake_case )
original.eval()
_lowercase = BertAbsSummarizer(_snake_case, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
_lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowercase = encoder_input_ids
_lowercase = decoder_input_ids
_lowercase = _lowercase = None
_lowercase = None
_lowercase = _lowercase = None
_lowercase = _lowercase = None
_lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowercase = original(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = original.generator(_snake_case )
_lowercase = new_model(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = new_model.generator(_snake_case )
_lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.allclose(_snake_case, _snake_case, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 227
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.