code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class snake_case__( UpperCAmelCase__, UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """pixel_values""" SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[int] = TimmBackboneConfig def __init__( self , __lowercase , **__lowercase ) -> Dict: requires_backends(self , '''timm''' ) super().__init__(__lowercase ) lowerCAmelCase_ : List[Any] = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__lowercase , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) lowerCAmelCase_ : Any = getattr(__lowercase , '''use_pretrained_backbone''' , __lowercase ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. lowerCAmelCase_ : List[Any] = config.out_indices if getattr(__lowercase , '''out_indices''' , __lowercase ) is not None else (-1,) lowerCAmelCase_ : List[Any] = timm.create_model( config.backbone , pretrained=__lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowercase , **__lowercase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowerCAmelCase_ : Optional[int] = self._backbone.return_layers lowerCAmelCase_ : List[str] = {layer['''module''']: str(__lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowercase ) @classmethod def lowercase_ ( cls , __lowercase , *__lowercase , **__lowercase ) -> List[Any]: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig lowerCAmelCase_ : List[Any] = kwargs.pop('''config''' , TimmBackboneConfig() ) lowerCAmelCase_ : str = kwargs.pop('''use_timm_backbone''' , __lowercase ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) lowerCAmelCase_ : int = kwargs.pop('''num_channels''' , config.num_channels ) lowerCAmelCase_ : List[str] = kwargs.pop('''features_only''' , config.features_only ) lowerCAmelCase_ : Union[str, Any] = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) lowerCAmelCase_ : Dict = kwargs.pop('''out_indices''' , config.out_indices ) lowerCAmelCase_ : Any = TimmBackboneConfig( backbone=__lowercase , num_channels=__lowercase , features_only=__lowercase , use_pretrained_backbone=__lowercase , out_indices=__lowercase , ) return super()._from_config(__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase ) -> Any: pass def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: lowerCAmelCase_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowerCAmelCase_ : Optional[int] = self._all_layers lowerCAmelCase_ : List[str] = self._backbone(__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = self._return_layers lowerCAmelCase_ : Dict = tuple(hidden_states[i] for i in self.out_indices ) else: lowerCAmelCase_ : Optional[Any] = self._backbone(__lowercase , **__lowercase ) lowerCAmelCase_ : int = None lowerCAmelCase_ : Optional[Any] = tuple(__lowercase ) lowerCAmelCase_ : Tuple = tuple(__lowercase ) if hidden_states is not None else None if not return_dict: lowerCAmelCase_ : List[Any] = (feature_maps,) if output_hidden_states: lowerCAmelCase_ : Any = output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowercase , hidden_states=__lowercase , attentions=__lowercase )
619
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) _UpperCAmelCase : Optional[int] ={ """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """sew""" def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase=2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=True , __lowercase=0.05 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=0 , __lowercase=1 , __lowercase=2 , **__lowercase , ) -> int: super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase ) lowerCAmelCase_ : Dict = hidden_size lowerCAmelCase_ : Tuple = feat_extract_norm lowerCAmelCase_ : Tuple = feat_extract_activation lowerCAmelCase_ : List[str] = list(__lowercase ) lowerCAmelCase_ : Optional[int] = list(__lowercase ) lowerCAmelCase_ : Tuple = list(__lowercase ) lowerCAmelCase_ : Optional[int] = conv_bias lowerCAmelCase_ : List[str] = num_conv_pos_embeddings lowerCAmelCase_ : Tuple = num_conv_pos_embedding_groups lowerCAmelCase_ : List[Any] = len(self.conv_dim ) lowerCAmelCase_ : List[str] = num_hidden_layers lowerCAmelCase_ : Any = intermediate_size lowerCAmelCase_ : Union[str, Any] = squeeze_factor lowerCAmelCase_ : Optional[int] = hidden_act lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : Optional[int] = hidden_dropout lowerCAmelCase_ : Optional[Any] = attention_dropout lowerCAmelCase_ : Tuple = activation_dropout lowerCAmelCase_ : List[str] = feat_proj_dropout lowerCAmelCase_ : Any = final_dropout lowerCAmelCase_ : int = layerdrop lowerCAmelCase_ : Tuple = layer_norm_eps lowerCAmelCase_ : Any = initializer_range lowerCAmelCase_ : Union[str, Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ : str = apply_spec_augment lowerCAmelCase_ : Tuple = mask_time_prob lowerCAmelCase_ : Dict = mask_time_length lowerCAmelCase_ : Optional[int] = mask_time_min_masks lowerCAmelCase_ : Optional[int] = mask_feature_prob lowerCAmelCase_ : Tuple = mask_feature_length lowerCAmelCase_ : Dict = mask_feature_min_masks # ctc loss lowerCAmelCase_ : List[str] = ctc_loss_reduction lowerCAmelCase_ : int = ctc_zero_infinity # sequence classification lowerCAmelCase_ : List[Any] = use_weighted_layer_sum lowerCAmelCase_ : List[Any] = classifier_proj_size @property def lowercase_ ( self ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
619
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
1
import math import sys def lowerCAmelCase ( lowerCAmelCase_ )-> int: if number != int(lowerCAmelCase_ ): raise ValueError('''the value of input must be a natural number''' ) if number < 0: raise ValueError('''the value of input must not be a negative number''' ) if number == 0: return 1 lowerCAmelCase_ : str = [-1] * (number + 1) lowerCAmelCase_ : Optional[Any] = 0 for i in range(1 , number + 1 ): lowerCAmelCase_ : List[str] = sys.maxsize lowerCAmelCase_ : Tuple = int(math.sqrt(lowerCAmelCase_ ) ) for j in range(1 , root + 1 ): lowerCAmelCase_ : str = 1 + answers[i - (j**2)] lowerCAmelCase_ : Tuple = min(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
619
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCAmelCase : List[str] =16 _UpperCAmelCase : Union[str, Any] =32 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: return int(x / 2**20 ) class snake_case__: '''simple docstring''' def __enter__( self ) -> List[str]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCAmelCase_ : Optional[int] = torch.cuda.memory_allocated() return self def __exit__( self , *__lowercase ) -> Dict: gc.collect() torch.cuda.empty_cache() lowerCAmelCase_ : Any = torch.cuda.memory_allocated() lowerCAmelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated() lowerCAmelCase_ : Any = bamb(self.end - self.begin ) lowerCAmelCase_ : Tuple = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 , lowerCAmelCase_ = "bert-base-cased" , lowerCAmelCase_ = 320 , lowerCAmelCase_ = 160 , )-> Dict: lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ : Tuple = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase_ : List[str] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) lowerCAmelCase_ : Any = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: # Initialize accelerator lowerCAmelCase_ : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ : List[str] = config['''lr'''] lowerCAmelCase_ : Any = int(config['''num_epochs'''] ) lowerCAmelCase_ : Optional[Any] = int(config['''seed'''] ) lowerCAmelCase_ : int = int(config['''batch_size'''] ) lowerCAmelCase_ : List[Any] = args.model_name_or_path set_seed(lowerCAmelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) # Instantiate optimizer lowerCAmelCase_ : Any = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowerCAmelCase_ : Tuple = 1 lowerCAmelCase_ : List[Any] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ : str = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , ) else: lowerCAmelCase_ : Tuple = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ : int = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ : Optional[Any] = 0 # Now we train the model lowerCAmelCase_ : Any = {} for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCAmelCase_ ): lowerCAmelCase_ : List[Any] = model(**lowerCAmelCase_ ) lowerCAmelCase_ : Dict = outputs.loss lowerCAmelCase_ : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCAmelCase_ : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( )-> Dict: lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=lowerCAmelCase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase_ , ) parser.add_argument( '''--output_dir''' , type=lowerCAmelCase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=lowerCAmelCase_ , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=lowerCAmelCase_ , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=lowerCAmelCase_ , default=1 , help='''Number of train epochs.''' , ) lowerCAmelCase_ : Dict = parser.parse_args() lowerCAmelCase_ : Dict = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
619
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] ={ """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =[ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
1
from random import randint, random def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = 5 , )-> list: lowerCAmelCase_ : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : str = max(lowerCAmelCase_ , 0 ) while i < number_of_cells: lowerCAmelCase_ : Any = ( randint(0 , lowerCAmelCase_ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Tuple = highway_now[car_index + 1 :] for cell in range(len(lowerCAmelCase_ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowerCAmelCase_ , -1 ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list: lowerCAmelCase_ : str = len(lowerCAmelCase_ ) # Beforce calculations, the highway is empty lowerCAmelCase_ : int = [-1] * number_of_cells for car_index in range(lowerCAmelCase_ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowerCAmelCase_ : Tuple = min(highway_now[car_index] + 1 , lowerCAmelCase_ ) # Number of empty cell before the next car lowerCAmelCase_ : Any = get_distance(lowerCAmelCase_ , lowerCAmelCase_ ) - 1 # We can't have the car causing an accident lowerCAmelCase_ : Dict = min(next_highway[car_index] , lowerCAmelCase_ ) if random() < probability: # Randomly, a driver will slow down lowerCAmelCase_ : Union[str, Any] = max(next_highway[car_index] - 1 , 0 ) return next_highway def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list: lowerCAmelCase_ : Tuple = len(highway[0] ) for i in range(lowerCAmelCase_ ): lowerCAmelCase_ : List[Any] = update(highway[i] , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : int = [-1] * number_of_cells for car_index in range(lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowerCAmelCase_ : Optional[Any] = (car_index + speed) % number_of_cells # Commit the change of position lowerCAmelCase_ : Optional[int] = speed highway.append(lowerCAmelCase_ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
619
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
1
def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Optional[Any] = abs(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : List[str] = abs(lowerCAmelCase_ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def lowerCAmelCase ( lowerCAmelCase_ )-> int: return sum(int(lowerCAmelCase_ ) for c in str(abs(lowerCAmelCase_ ) ) ) def lowerCAmelCase ( )-> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) -> None: lowerCAmelCase_ : Any = f"""{func.__name__}({value})""" lowerCAmelCase_ : Union[str, Any] = timeit(f"""__main__.{call}""" , setup='''import __main__''' ) print(f"""{call:56} = {func(lowerCAmelCase_ )} -- {timing:.4f} seconds""" ) for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
619
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
1
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
1
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[indexa], array[indexa] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None: if length > 1: lowerCAmelCase_ : Tuple = int(length / 2 ) for i in range(lowerCAmelCase_ , low + middle ): comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ ) bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None: if length > 1: lowerCAmelCase_ : List[Any] = int(length / 2 ) bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 ) bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 ) bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": _UpperCAmelCase : Tuple =input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase : Optional[int] =[int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
619
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar _UpperCAmelCase : Dict =TypeVar("""T""") def lowerCAmelCase ( lowerCAmelCase_ )-> int: return (position - 1) // 2 def lowerCAmelCase ( lowerCAmelCase_ )-> int: return (2 * position) + 1 def lowerCAmelCase ( lowerCAmelCase_ )-> int: return (2 * position) + 2 class snake_case__( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase_ : list[tuple[T, int]] = [] lowerCAmelCase_ : dict[T, int] = {} lowerCAmelCase_ : int = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def lowercase_ ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def lowercase_ ( self , __lowercase , __lowercase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) lowerCAmelCase_ : Any = self.elements self.elements += 1 self._bubble_up(__lowercase ) def lowercase_ ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) lowerCAmelCase_ , lowerCAmelCase_ : Any = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowerCAmelCase_ , lowerCAmelCase_ : str = self.heap[0] self._bubble_down(__lowercase ) return elem def lowercase_ ( self , __lowercase , __lowercase ) -> None: # Update the weight of the given key lowerCAmelCase_ : int = self.position_map[elem] lowerCAmelCase_ : List[str] = (elem, weight) if position > 0: lowerCAmelCase_ : List[Any] = get_parent_position(__lowercase ) lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowercase ) else: self._bubble_down(__lowercase ) else: self._bubble_down(__lowercase ) def lowercase_ ( self , __lowercase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] lowerCAmelCase_ : Optional[Any] = self.position_map[elem] if curr_pos == 0: return None lowerCAmelCase_ : List[Any] = get_parent_position(__lowercase ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.heap[curr_pos] lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_up(__lowercase ) return None def lowercase_ ( self , __lowercase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] lowerCAmelCase_ : Union[str, Any] = self.position_map[elem] lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.heap[curr_pos] lowerCAmelCase_ : Union[str, Any] = get_child_left_position(__lowercase ) lowerCAmelCase_ : Tuple = get_child_right_position(__lowercase ) if child_left_position < self.elements and child_right_position < self.elements: lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.heap[child_left_position] lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) if child_left_position < self.elements: lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) else: return None if child_right_position < self.elements: lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) return None def lowercase_ ( self , __lowercase , __lowercase ) -> None: # Swap the nodes at the given positions lowerCAmelCase_ : Dict = self.heap[nodea_pos][0] lowerCAmelCase_ : int = self.heap[nodea_pos][0] lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowerCAmelCase_ : Tuple = nodea_pos lowerCAmelCase_ : List[Any] = nodea_pos class snake_case__( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase_ : dict[T, dict[T, int]] = {} lowerCAmelCase_ : int = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def lowercase_ ( self , __lowercase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: lowerCAmelCase_ : List[str] = {} self.nodes += 1 def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowercase ) self.add_node(__lowercase ) lowerCAmelCase_ : Any = weight lowerCAmelCase_ : str = weight def lowerCAmelCase ( lowerCAmelCase_ , )-> tuple[dict[T, int], dict[T, T | None]]: lowerCAmelCase_ : dict[T, int] = {node: maxsize for node in graph.connections} lowerCAmelCase_ : dict[T, T | None] = {node: None for node in graph.connections} lowerCAmelCase_ : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCAmelCase_ , lowerCAmelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization lowerCAmelCase_ : Optional[int] = priority_queue.extract_min() lowerCAmelCase_ : str = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase_ : List[str] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] ) lowerCAmelCase_ : Dict = node # running prim's algorithm while not priority_queue.is_empty(): lowerCAmelCase_ : int = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase_ : int = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] ) lowerCAmelCase_ : Optional[int] = node return dist, parent
619
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> int: lowerCAmelCase_ : str = 0 @slow def lowercase_ ( self ) -> Optional[Any]: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(__lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(__lowercase ) , 0 ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) # Check that tokenizer_type ≠ model_type lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase ) self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowercase_ ( self ) -> int: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) ) lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' , use_fast=__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) ) lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' , use_fast=__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) @require_tokenizers def lowercase_ ( self ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) ) lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' ) self.assertIsInstance(__lowercase , __lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) ) lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' ) self.assertIsInstance(__lowercase , __lowercase ) def lowercase_ ( self ) -> Dict: with pytest.raises(__lowercase ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self ) -> int: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase_ : List[Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(__lowercase , __lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase ) else: self.assertEqual(tokenizer.do_lower_case , __lowercase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowercase_ ( self ) -> Optional[int]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( __lowercase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase_ : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self ) -> List[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase_ : Optional[int] = TOKENIZER_MAPPING.values() lowerCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(__lowercase ) @require_tokenizers def lowercase_ ( self ) -> Dict: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowercase ) , __lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowercase ) @require_tokenizers def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowercase ) lowerCAmelCase_ : Optional[Any] = '''Hello, world. How are you?''' lowerCAmelCase_ : List[str] = tokenizer.tokenize(__lowercase ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowercase ) lowerCAmelCase_ : List[str] = tokenizer.tokenize(__lowercase ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(__lowercase ) , __lowercase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : int = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(__lowercase , __lowercase ) def lowercase_ ( self ) -> Optional[Any]: # Check we can load the tokenizer config of an online model. lowerCAmelCase_ : Union[str, Any] = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase_ : Union[str, Any] = config.pop('''_commit_hash''' , __lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(__lowercase , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase_ : List[str] = get_tokenizer_config(__lowercase ) self.assertDictEqual(__lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : List[str] = get_tokenizer_config(__lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self ) -> Union[str, Any]: try: AutoConfig.register('''custom''' , __lowercase ) AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase ): AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase ) lowerCAmelCase_ : Optional[Any] = CustomTokenizer.from_pretrained(__lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self ) -> List[str]: try: AutoConfig.register('''custom''' , __lowercase ) # Can register in two steps AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( __lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase ): AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : Union[str, Any] = BertTokenizerFast.from_pretrained(__lowercase ) bert_tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : List[Any] = CustomTokenizerFast.from_pretrained(__lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self ) -> int: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowercase ): lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowercase ): lowerCAmelCase_ : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase ) lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase_ : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self ) -> Tuple: class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = False class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = NewTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = False try: AutoConfig.register('''custom''' , __lowercase ) AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase ) AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase ) # If remote code is not set, the default is to use local lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowercase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase_ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase , use_fast=__lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self ) -> int: with self.assertRaisesRegex( __lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self ) -> Optional[int]: with self.assertRaisesRegex( __lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , revision='''aaaaaa''' ) def lowercase_ ( self ) -> str: # Make sure we have cached the tokenizer. lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
619
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
1
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : str = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Optional[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Optional[Any] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : str = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : Tuple = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Dict = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Optional[Any] = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : Any = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : List[str] = fill.copy().set_fill(__lowercase , opacity=0.8 ) target.move_to(__lowercase ) model_arr.append(__lowercase ) lowerCAmelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Any = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Dict = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4, -1.25, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : Tuple = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : int = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase ) ) lowerCAmelCase_ : Dict = Square(0.3 ) input.set_fill(__lowercase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __lowercase , buff=0.5 ) self.play(Write(__lowercase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__lowercase , buff=0.02 ) self.play(MoveToTarget(__lowercase ) ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Optional[int] = Arrow(start=__lowercase , end=__lowercase , color=__lowercase , buff=0.5 ) a.next_to(model_arr[0].get_left() , __lowercase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCAmelCase_ : List[str] = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) lowerCAmelCase_ : Union[str, Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(__lowercase ) , Circumscribe(model_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_cpu_arr[0] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCAmelCase_ : str = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , __lowercase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCAmelCase_ : Tuple = AnimationGroup( FadeOut(__lowercase , run_time=0.5 ) , MoveToTarget(__lowercase , run_time=0.5 ) , FadeIn(__lowercase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__lowercase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCAmelCase_ : Dict = 0.7 self.play( Circumscribe(model_arr[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i] , **__lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , Circumscribe(model_arr[i + 1] , color=__lowercase , **__lowercase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__lowercase , **__lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowercase , **__lowercase ) , Circumscribe(gpu_rect[0] , color=__lowercase , **__lowercase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCAmelCase_ : Dict = a_c lowerCAmelCase_ : int = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(__lowercase ) , FadeOut(__lowercase , run_time=0.5 ) , ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) , MoveToTarget(__lowercase ) ) self.wait()
619
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
1
from math import ceil def lowerCAmelCase ( lowerCAmelCase_ = 1_001 )-> int: lowerCAmelCase_ : int = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCAmelCase_ : Union[str, Any] = 2 * i + 1 lowerCAmelCase_ : List[str] = 2 * i lowerCAmelCase_ : Dict = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: _UpperCAmelCase : Dict =int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
619
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> Optional[Any]: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase ) @torch.no_grad() def __call__( self , __lowercase = 1 , __lowercase = None , __lowercase = 5_0 , __lowercase = "pil" , __lowercase = True , **__lowercase , ) -> Union[ImagePipelineOutput, Tuple]: lowerCAmelCase_ : Optional[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase_ : Union[str, Any] = self.unet(__lowercase , __lowercase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase_ : Tuple = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample lowerCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ : Optional[int] = self.numpy_to_pil(__lowercase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowercase ), "This is a local test"
619
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
1
class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> None: lowerCAmelCase_ : Optional[Any] = set_counts lowerCAmelCase_ : int = max(__lowercase ) lowerCAmelCase_ : List[Any] = len(__lowercase ) lowerCAmelCase_ : List[str] = [1] * num_sets lowerCAmelCase_ : Dict = list(range(__lowercase ) ) def lowercase_ ( self , __lowercase , __lowercase ) -> bool: lowerCAmelCase_ : Union[str, Any] = self.get_parent(__lowercase ) lowerCAmelCase_ : int = self.get_parent(__lowercase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Any = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCAmelCase_ : int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[Any] = src_parent lowerCAmelCase_ : Dict = self.set_counts[src_parent] lowerCAmelCase_ : Dict = max(self.max_set , __lowercase ) return True def lowercase_ ( self , __lowercase ) -> int: if self.parents[disj_set] == disj_set: return disj_set lowerCAmelCase_ : Any = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
619
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
1
from ..utils import DummyObject, requires_backends class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> str: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> int: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Any: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[str]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Dict: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Any: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[str]: requires_backends(cls , ['''flax'''] ) class snake_case__( metaclass=UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["""flax"""] def __init__( self , *__lowercase , **__lowercase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def lowercase_ ( cls , *__lowercase , **__lowercase ) -> List[Any]: requires_backends(cls , ['''flax'''] )
619
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="resnet50" , __lowercase=3 , __lowercase=3_2 , __lowercase=3 , __lowercase=True , __lowercase=True , ) -> List[Any]: lowerCAmelCase_ : int = parent lowerCAmelCase_ : Union[str, Any] = out_indices if out_indices is not None else [4] lowerCAmelCase_ : Optional[Any] = stage_names lowerCAmelCase_ : int = out_features lowerCAmelCase_ : Union[str, Any] = backbone lowerCAmelCase_ : List[str] = batch_size lowerCAmelCase_ : Any = image_size lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : List[Any] = use_pretrained_backbone lowerCAmelCase_ : int = is_training def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : List[str] = self.get_config() return config, pixel_values def lowercase_ ( self ) -> str: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = TimmBackbone(config=__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : Dict = model(__lowercase ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Dict = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class snake_case__( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (TimmBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {} SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : str = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = TimmBackboneModelTester(self ) lowerCAmelCase_ : Dict = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Tuple: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Dict = '''resnet18''' lowerCAmelCase_ : Dict = '''microsoft/resnet-18''' lowerCAmelCase_ : Union[str, Any] = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase ) lowerCAmelCase_ : Any = AutoBackbone.from_pretrained(__lowercase ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) lowerCAmelCase_ : Optional[int] = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase , out_indices=[1, 2, 3] ) lowerCAmelCase_ : List[Any] = AutoBackbone.from_pretrained(__lowercase , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def lowercase_ ( self ) -> Tuple: pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def lowercase_ ( self ) -> Any: pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def lowercase_ ( self ) -> str: pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def lowercase_ ( self ) -> List[str]: pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def lowercase_ ( self ) -> Tuple: pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def lowercase_ ( self ) -> List[str]: pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def lowercase_ ( self ) -> int: pass @unittest.skip('''Safetensors is not supported by timm.''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Dict = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()] lowerCAmelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : int = True lowerCAmelCase_ : Dict = self.has_attentions # no need to test all models as different heads yield the same functionality lowerCAmelCase_ : Tuple = self.all_model_classes[0] lowerCAmelCase_ : Dict = model_class(__lowercase ) model.to(__lowercase ) lowerCAmelCase_ : Optional[int] = self._prepare_for_class(__lowercase , __lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) lowerCAmelCase_ : str = outputs[0][-1] # Encoder-/Decoder-only models lowerCAmelCase_ : List[str] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: lowerCAmelCase_ : Dict = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__lowercase ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : int = model_class(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Tuple = model(**__lowercase ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None lowerCAmelCase_ : int = copy.deepcopy(__lowercase ) lowerCAmelCase_ : Any = None lowerCAmelCase_ : Union[str, Any] = model_class(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights lowerCAmelCase_ : int = copy.deepcopy(__lowercase ) lowerCAmelCase_ : Dict = False lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Union[str, Any] = model(**__lowercase )
619
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=0.999 , lowerCAmelCase_="cosine" , )-> Optional[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCAmelCase_ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCAmelCase_ ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase_ : Optional[Any] = [] for i in range(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[Any] = i / num_diffusion_timesteps lowerCAmelCase_ : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCAmelCase_ ) / alpha_bar_fn(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) return torch.tensor(lowerCAmelCase_ , dtype=torch.floataa ) class snake_case__( UpperCAmelCase__, UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE__ : List[str] = 2 @register_to_config def __init__( self , __lowercase = 1_0_0_0 , __lowercase = 0.0_00_85 , __lowercase = 0.0_12 , __lowercase = "linear" , __lowercase = None , __lowercase = "epsilon" , __lowercase = False , __lowercase = False , __lowercase = 1.0 , __lowercase = "linspace" , __lowercase = 0 , ) -> Tuple: if trained_betas is not None: lowerCAmelCase_ : int = torch.tensor(__lowercase , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase_ : Dict = torch.linspace(__lowercase , __lowercase , __lowercase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase_ : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowercase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase_ : str = betas_for_alpha_bar(__lowercase , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": lowerCAmelCase_ : List[Any] = betas_for_alpha_bar(__lowercase , alpha_transform_type='''exp''' ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowerCAmelCase_ : List[Any] = 1.0 - self.betas lowerCAmelCase_ : List[str] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowercase , __lowercase , __lowercase ) lowerCAmelCase_ : Optional[int] = use_karras_sigmas def lowercase_ ( self , __lowercase , __lowercase=None ) -> List[str]: if schedule_timesteps is None: lowerCAmelCase_ : List[Any] = self.timesteps lowerCAmelCase_ : Dict = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase_ : Optional[Any] = 1 if len(__lowercase ) > 1 else 0 else: lowerCAmelCase_ : List[str] = timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep lowerCAmelCase_ : str = self._index_counter[timestep_int] return indices[pos].item() @property def lowercase_ ( self ) -> Union[str, Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowercase_ ( self , __lowercase , __lowercase , ) -> torch.FloatTensor: lowerCAmelCase_ : Any = self.index_for_timestep(__lowercase ) lowerCAmelCase_ : Union[str, Any] = self.sigmas[step_index] lowerCAmelCase_ : List[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , ) -> Optional[Any]: lowerCAmelCase_ : Tuple = num_inference_steps lowerCAmelCase_ : Tuple = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase_ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , __lowercase , dtype=__lowercase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase_ : int = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase_ : Union[str, Any] = (np.arange(0 , __lowercase ) * step_ratio).round()[::-1].copy().astype(__lowercase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase_ : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase_ : str = (np.arange(__lowercase , 0 , -step_ratio )).round().copy().astype(__lowercase ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowerCAmelCase_ : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase_ : Any = np.log(__lowercase ) lowerCAmelCase_ : Union[str, Any] = np.interp(__lowercase , np.arange(0 , len(__lowercase ) ) , __lowercase ) if self.config.use_karras_sigmas: lowerCAmelCase_ : int = self._convert_to_karras(in_sigmas=__lowercase , num_inference_steps=self.num_inference_steps ) lowerCAmelCase_ : Tuple = np.array([self._sigma_to_t(__lowercase , __lowercase ) for sigma in sigmas] ) lowerCAmelCase_ : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase_ : List[str] = torch.from_numpy(__lowercase ).to(device=__lowercase ) lowerCAmelCase_ : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase_ : Dict = torch.from_numpy(__lowercase ) lowerCAmelCase_ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__lowercase ).startswith('''mps''' ): # mps does not support float64 lowerCAmelCase_ : Union[str, Any] = timesteps.to(__lowercase , dtype=torch.floataa ) else: lowerCAmelCase_ : Tuple = timesteps.to(device=__lowercase ) # empty dt and derivative lowerCAmelCase_ : str = None lowerCAmelCase_ : List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase_ : List[str] = defaultdict(__lowercase ) def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[int]: # get log sigma lowerCAmelCase_ : List[str] = np.log(__lowercase ) # get distribution lowerCAmelCase_ : str = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range lowerCAmelCase_ : List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) lowerCAmelCase_ : Union[str, Any] = low_idx + 1 lowerCAmelCase_ : Optional[int] = log_sigmas[low_idx] lowerCAmelCase_ : str = log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase_ : List[str] = (low - log_sigma) / (low - high) lowerCAmelCase_ : int = np.clip(__lowercase , 0 , 1 ) # transform interpolation to time range lowerCAmelCase_ : str = (1 - w) * low_idx + w * high_idx lowerCAmelCase_ : List[Any] = t.reshape(sigma.shape ) return t def lowercase_ ( self , __lowercase , __lowercase ) -> torch.FloatTensor: lowerCAmelCase_ : float = in_sigmas[-1].item() lowerCAmelCase_ : float = in_sigmas[0].item() lowerCAmelCase_ : Dict = 7.0 # 7.0 is the value used in the paper lowerCAmelCase_ : str = np.linspace(0 , 1 , __lowercase ) lowerCAmelCase_ : List[Any] = sigma_min ** (1 / rho) lowerCAmelCase_ : Tuple = sigma_max ** (1 / rho) lowerCAmelCase_ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowercase_ ( self ) -> List[str]: return self.dt is None def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Union[SchedulerOutput, Tuple]: lowerCAmelCase_ : int = self.index_for_timestep(__lowercase ) # advance index counter by 1 lowerCAmelCase_ : Tuple = timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase_ : List[str] = self.sigmas[step_index] lowerCAmelCase_ : str = self.sigmas[step_index + 1] else: # 2nd order / Heun's method lowerCAmelCase_ : Optional[Any] = self.sigmas[step_index - 1] lowerCAmelCase_ : Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase_ : str = 0 lowerCAmelCase_ : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase_ : Dict = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase_ : Optional[int] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase_ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase_ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": lowerCAmelCase_ : Optional[int] = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: lowerCAmelCase_ : Any = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase_ : Dict = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase_ : Dict = sigma_next - sigma_hat # store for 2nd order step lowerCAmelCase_ : Optional[int] = derivative lowerCAmelCase_ : Union[str, Any] = dt lowerCAmelCase_ : int = sample else: # 2. 2nd order / Heun's method lowerCAmelCase_ : int = (sample - pred_original_sample) / sigma_next lowerCAmelCase_ : Any = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample lowerCAmelCase_ : int = self.dt lowerCAmelCase_ : int = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" lowerCAmelCase_ : str = None lowerCAmelCase_ : str = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Union[str, Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowercase ): # mps does not support float64 lowerCAmelCase_ : Dict = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCAmelCase_ : str = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCAmelCase_ : str = self.timesteps.to(original_samples.device ) lowerCAmelCase_ : Union[str, Any] = timesteps.to(original_samples.device ) lowerCAmelCase_ : int = [self.index_for_timestep(__lowercase , __lowercase ) for t in timesteps] lowerCAmelCase_ : Union[str, Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase_ : Optional[Any] = sigma.unsqueeze(-1 ) lowerCAmelCase_ : Optional[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ) -> Any: return self.config.num_train_timesteps
619
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCAmelCase ( lowerCAmelCase_ )-> tuple: return (data["data"], data["target"]) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> np.ndarray: lowerCAmelCase_ : Optional[Any] = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(lowerCAmelCase_ , lowerCAmelCase_ ) # Predict target for test data lowerCAmelCase_ : Union[str, Any] = xgb.predict(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = predictions.reshape(len(lowerCAmelCase_ ) , 1 ) return predictions def lowerCAmelCase ( )-> None: lowerCAmelCase_ : Any = fetch_california_housing() lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = data_handling(lowerCAmelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = train_test_split( lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 , random_state=1 ) lowerCAmelCase_ : Dict = xgboost(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) print(f"""Mean Square Error : {mean_squared_error(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
619
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
1
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VQModel SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sample""" @property def lowercase_ ( self , __lowercase=(3_2, 3_2) ) -> int: lowerCAmelCase_ : Tuple = 4 lowerCAmelCase_ : Optional[Any] = 3 lowerCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase ) return {"sample": image} @property def lowercase_ ( self ) -> Optional[int]: return (3, 3_2, 3_2) @property def lowercase_ ( self ) -> Union[str, Any]: return (3, 3_2, 3_2) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Union[str, Any] = { '''block_out_channels''': [3_2, 6_4], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCAmelCase_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Optional[Any]: pass def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__lowercase ) self.assertIsNotNone(__lowercase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__lowercase ) lowerCAmelCase_ : Tuple = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[str] = VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(__lowercase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCAmelCase_ : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) lowerCAmelCase_ : List[Any] = image.to(__lowercase ) with torch.no_grad(): lowerCAmelCase_ : Any = model(__lowercase ).sample lowerCAmelCase_ : Dict = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCAmelCase_ : Any = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] ) # fmt: on self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
619
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = CodeGenTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizerFast SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ : List[str] = {"""add_prefix_space""": True} SCREAMING_SNAKE_CASE__ : int = False def lowercase_ ( self ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] lowerCAmelCase_ : Tuple = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) lowerCAmelCase_ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase_ : Tuple = {'''unk_token''': '''<unk>'''} lowerCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def lowercase_ ( self , **__lowercase ) -> List[str]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def lowercase_ ( self , **__lowercase ) -> Tuple: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Optional[Any] = '''lower newer''' lowerCAmelCase_ : List[Any] = '''lower newer''' return input_text, output_text def lowercase_ ( self ) -> int: lowerCAmelCase_ : Dict = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ : Tuple = '''lower newer''' lowerCAmelCase_ : List[Any] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCAmelCase_ : Any = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : int = tokens + [tokenizer.unk_token] lowerCAmelCase_ : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def lowercase_ ( self ) -> int: if not self.test_rust_tokenizer: return lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : Dict = self.get_rust_tokenizer(add_prefix_space=__lowercase ) lowerCAmelCase_ : Optional[Any] = '''lower newer''' # Testing tokenization lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) lowerCAmelCase_ : List[Any] = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids without special tokens lowerCAmelCase_ : List[Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) lowerCAmelCase_ : Tuple = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids with special tokens lowerCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__lowercase ) lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_prefix_space=__lowercase ) lowerCAmelCase_ : Dict = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing the unknown token lowerCAmelCase_ : Any = tokens + [rust_tokenizer.unk_token] lowerCAmelCase_ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def lowercase_ ( self , *__lowercase , **__lowercase ) -> Optional[Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def lowercase_ ( self , __lowercase=1_5 ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase ) # Simple input lowerCAmelCase_ : Dict = '''This is a simple input''' lowerCAmelCase_ : Dict = ['''This is a simple input 1''', '''This is a simple input 2'''] lowerCAmelCase_ : Tuple = ('''This is a simple input''', '''This is a pair''') lowerCAmelCase_ : Optional[int] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Simple input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Simple input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Pair input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input lowerCAmelCase_ : Optional[Any] = '''This is a simple input''' lowerCAmelCase_ : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input'''] lowerCAmelCase_ : Optional[Any] = ('''This is a simple input''', '''This is a pair''') lowerCAmelCase_ : Dict = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] lowerCAmelCase_ : Any = tokenizer.pad_token_id lowerCAmelCase_ : str = tokenizer(__lowercase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' ) lowerCAmelCase_ : List[str] = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''' ) lowerCAmelCase_ : Dict = tokenizer(*__lowercase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' ) lowerCAmelCase_ : Dict = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : int = '''$$$''' lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase ) lowerCAmelCase_ : Optional[Any] = '''This is a simple input''' lowerCAmelCase_ : Optional[int] = ['''This is a simple input 1''', '''This is a simple input 2'''] lowerCAmelCase_ : Optional[int] = tokenizer.bos_token_id lowerCAmelCase_ : List[str] = tokenizer(__lowercase ) lowerCAmelCase_ : List[str] = tokenizer(__lowercase ) self.assertEqual(out_s.input_ids[0] , __lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowerCAmelCase_ : List[Any] = tokenizer.decode(out_s.input_ids ) lowerCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) lowerCAmelCase_ : int = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' lowerCAmelCase_ : Union[str, Any] = '''\nif len_a > len_b: result = a\nelse: result = b''' lowerCAmelCase_ : Dict = tokenizer.encode(__lowercase ) lowerCAmelCase_ : Dict = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] lowerCAmelCase_ : Tuple = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase ) self.assertEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Tuple: pass
619
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
def lowerCAmelCase ( lowerCAmelCase_=28_123 )-> str: lowerCAmelCase_ : List[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowerCAmelCase_ : Optional[int] = set() lowerCAmelCase_ : Dict = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowerCAmelCase_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
619
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _UpperCAmelCase : Tuple =TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) _UpperCAmelCase : str =[] _UpperCAmelCase : int =[] _UpperCAmelCase : Any ={"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} _UpperCAmelCase : Union[str, Any] =[ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", """emoji""": True, }, } ] _UpperCAmelCase : List[str] =0 for log in Path().glob("""*.log"""): _UpperCAmelCase : List[str] =0 with open(log, """r""") as f: for line in f: _UpperCAmelCase : str =json.loads(line) if line.get("""nodeid""", """""") != "": _UpperCAmelCase : Optional[int] =line["""nodeid"""] if line.get("""duration""", None) is not None: _UpperCAmelCase : Tuple =f"""{line['duration']:.4f}""" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _UpperCAmelCase : str =[] log.unlink() _UpperCAmelCase : Any ="""""" _UpperCAmelCase : List[str] =[] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" _UpperCAmelCase : int =[] _UpperCAmelCase : List[Any] ={} for test in failed_tests: _UpperCAmelCase : str =test[0].split("""::""") _UpperCAmelCase : Any =data[0].split("""/""")[-1] if data[0] not in filesafailed: _UpperCAmelCase : Optional[int] =[data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _UpperCAmelCase : List[str] =[test[0] for test in failed_table] _UpperCAmelCase : Optional[Any] =list(set(files)) # Count number of instances in failed_tests _UpperCAmelCase : Dict =[] for file in individual_files: table.append([file, len(filesafailed[file])]) _UpperCAmelCase : Tuple =tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: _UpperCAmelCase : str ="""Too many failed tests, please see the full report in the Action results.""" _UpperCAmelCase : List[str] =len(err) + 10 _UpperCAmelCase : List[str] =message[: 3000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: _UpperCAmelCase : Optional[Any] ="""No failed tests! 🤗""" print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient _UpperCAmelCase : Any =WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": _UpperCAmelCase : List[str] ={ """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) _UpperCAmelCase : Tuple ={ """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) _UpperCAmelCase : int ={ """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) _UpperCAmelCase : List[str] =client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) _UpperCAmelCase : int =response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _UpperCAmelCase : Optional[int] ="""""" for i, row in enumerate(test_failures): if row[0] != test_class: _UpperCAmelCase : str =row[0] else: _UpperCAmelCase : Union[str, Any] ="""""" _UpperCAmelCase : int ={ """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
619
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] ={ """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = """mra""" def __init__( self , __lowercase=5_0_2_6_5 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="absolute" , __lowercase=4 , __lowercase="full" , __lowercase=0 , __lowercase=0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> List[str]: super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Union[str, Any] = vocab_size lowerCAmelCase_ : List[Any] = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : Dict = num_attention_heads lowerCAmelCase_ : Optional[Any] = intermediate_size lowerCAmelCase_ : Optional[Any] = hidden_act lowerCAmelCase_ : List[str] = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : Optional[int] = type_vocab_size lowerCAmelCase_ : Optional[int] = layer_norm_eps lowerCAmelCase_ : Any = position_embedding_type lowerCAmelCase_ : List[str] = block_per_row lowerCAmelCase_ : List[str] = approx_mode lowerCAmelCase_ : Optional[Any] = initial_prior_first_n_blocks lowerCAmelCase_ : Tuple = initial_prior_diagonal_n_blocks
619
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3_2 , __lowercase=2 , __lowercase=3 , __lowercase=1_6 , __lowercase=[3_2, 6_4, 1_2_8] , __lowercase=[1, 2, 1] , __lowercase=[2, 2, 4] , __lowercase=2 , __lowercase=2.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=True , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=1_0 , __lowercase=8 , __lowercase=["stage1", "stage2"] , __lowercase=[1, 2] , ) -> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = parent lowerCAmelCase_ : Optional[int] = batch_size lowerCAmelCase_ : Union[str, Any] = image_size lowerCAmelCase_ : Tuple = patch_size lowerCAmelCase_ : Union[str, Any] = num_channels lowerCAmelCase_ : Optional[int] = embed_dim lowerCAmelCase_ : int = hidden_sizes lowerCAmelCase_ : Any = depths lowerCAmelCase_ : Optional[Any] = num_heads lowerCAmelCase_ : Optional[int] = window_size lowerCAmelCase_ : Union[str, Any] = mlp_ratio lowerCAmelCase_ : List[str] = qkv_bias lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob lowerCAmelCase_ : Any = attention_probs_dropout_prob lowerCAmelCase_ : Optional[int] = drop_path_rate lowerCAmelCase_ : Optional[Any] = hidden_act lowerCAmelCase_ : str = use_absolute_embeddings lowerCAmelCase_ : Tuple = patch_norm lowerCAmelCase_ : Optional[int] = layer_norm_eps lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : List[str] = is_training lowerCAmelCase_ : Tuple = scope lowerCAmelCase_ : Tuple = use_labels lowerCAmelCase_ : int = type_sequence_label_size lowerCAmelCase_ : Any = encoder_stride lowerCAmelCase_ : Dict = out_features lowerCAmelCase_ : List[str] = out_indices def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Dict = None if self.use_labels: lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : Tuple = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Union[str, Any]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]: lowerCAmelCase_ : Optional[Any] = FocalNetModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Dict = model(__lowercase ) lowerCAmelCase_ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase_ : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : str = FocalNetBackbone(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : str = model(__lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Dict = FocalNetBackbone(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : str = model(__lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : Dict = FocalNetForMaskedImageModeling(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : str = model(__lowercase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase_ : Union[str, Any] = 1 lowerCAmelCase_ : Optional[int] = FocalNetForMaskedImageModeling(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ : Any = model(__lowercase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict: lowerCAmelCase_ : Optional[Any] = self.type_sequence_label_size lowerCAmelCase_ : Dict = FocalNetForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase_ : Optional[int] = 1 lowerCAmelCase_ : List[str] = FocalNetForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ : Any = model(__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs lowerCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( {"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Any = FocalNetModelTester(self ) lowerCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=__lowercase , embed_dim=3_7 , has_text_modality=__lowercase ) def lowercase_ ( self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Any: return def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @unittest.skip(reason='''FocalNet does not use inputs_embeds''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip(reason='''FocalNet does not use feedforward chunking''' ) def lowercase_ ( self ) -> Any: pass def lowercase_ ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCAmelCase_ : int = model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCAmelCase_ : Optional[int] = model_class(__lowercase ) lowerCAmelCase_ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Any = [*signature.parameters.keys()] lowerCAmelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : str = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase ) ) lowerCAmelCase_ : Tuple = outputs.hidden_states lowerCAmelCase_ : Optional[Any] = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__lowercase ) , __lowercase ) # FocalNet has a different seq_length lowerCAmelCase_ : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase_ : Any = outputs.reshaped_hidden_states self.assertEqual(len(__lowercase ) , __lowercase ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = reshaped_hidden_states[0].shape lowerCAmelCase_ : Tuple = ( reshaped_hidden_states[0].view(__lowercase , __lowercase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCAmelCase_ : Optional[int] = True self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : Dict = True self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Optional[int] = 3 lowerCAmelCase_ : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase_ : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase_ : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase_ : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCAmelCase_ : List[Any] = True self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) ) @slow def lowercase_ ( self ) -> Dict: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Tuple = FocalNetModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Optional[int] = _config_zero_init(__lowercase ) for model_class in self.all_model_classes: lowerCAmelCase_ : Dict = model_class(config=__lowercase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> int: # TODO update organization return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None @slow def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(__lowercase ) lowerCAmelCase_ : Any = self.default_image_processor lowerCAmelCase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase_ : Any = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): lowerCAmelCase_ : Tuple = model(**__lowercase ) # verify the logits lowerCAmelCase_ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __lowercase ) lowerCAmelCase_ : int = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 ) @require_torch class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = FocalNetConfig SCREAMING_SNAKE_CASE__ : str = False def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = FocalNetModelTester(self )
619
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
1
import torch from diffusers import DiffusionPipeline class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> Any: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase ) def __call__( self ) -> Optional[int]: lowerCAmelCase_ : int = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowerCAmelCase_ : Optional[int] = 1 lowerCAmelCase_ : int = self.unet(__lowercase , __lowercase ).sample lowerCAmelCase_ : Dict = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample lowerCAmelCase_ : List[str] = scheduler_output - scheduler_output + torch.ones_like(__lowercase ) return result
619
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) _UpperCAmelCase : List[Any] ={"""vocab_file""": """spiece.model"""} _UpperCAmelCase : Union[str, Any] ={ """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } _UpperCAmelCase : Optional[int] ={ """AI-Sweden/gpt-sw3-126m""": 2048, """AI-Sweden/gpt-sw3-350m""": 2048, """AI-Sweden/gpt-sw3-1.6b""": 2048, """AI-Sweden/gpt-sw3-6.7b""": 2048, """AI-Sweden/gpt-sw3-20b""": 2048, } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : str = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = None , **__lowercase , ) -> None: lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase_ : Union[str, Any] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) lowerCAmelCase_ : Optional[int] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token lowerCAmelCase_ : str = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase_ : Tuple = unk_token if pad_token is None else pad_token lowerCAmelCase_ : str = eos_token if bos_token is None else bos_token else: lowerCAmelCase_ : List[str] = '''<pad>''' if pad_token is None else pad_token lowerCAmelCase_ : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) lowerCAmelCase_ : Tuple = do_lower_case lowerCAmelCase_ : Optional[Any] = remove_space lowerCAmelCase_ : Tuple = keep_accents lowerCAmelCase_ : Optional[int] = vocab_file lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase_ : Dict = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase_ : Any = re.compile( f"""[{"".join(map(__lowercase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" ) def __getstate__( self ) -> List[Any]: lowerCAmelCase_ : Optional[int] = self.__dict__.copy() lowerCAmelCase_ : Union[str, Any] = None return state def __setstate__( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowercase_ ( self ) -> int: return len(self.sp_model ) def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : List[Any] = self.non_printing_characters_re.sub('''''' , __lowercase ) # Normalize whitespaces lowerCAmelCase_ : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization lowerCAmelCase_ : Any = unicodedata.normalize('''NFC''' , __lowercase ) return text def lowercase_ ( self , __lowercase , **__lowercase ) -> List[str]: lowerCAmelCase_ : Optional[int] = self.preprocess_text(__lowercase ) return self.sp_model.encode(__lowercase , out_type=__lowercase ) def lowercase_ ( self , __lowercase ) -> int: return self.sp_model.PieceToId(__lowercase ) def lowercase_ ( self , __lowercase ) -> str: return self.sp_model.IdToPiece(__lowercase ) @staticmethod def lowercase_ ( __lowercase ) -> str: return out_string def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : str = [] lowerCAmelCase_ : Union[str, Any] = '''''' lowerCAmelCase_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowercase ) + token lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : Dict = [] else: current_sub_tokens.append(__lowercase ) lowerCAmelCase_ : str = False out_string += self.sp_model.decode(__lowercase ) return out_string def lowercase_ ( self ) -> Dict[str, int]: lowerCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : int = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: lowerCAmelCase_ : str = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,) def lowercase_ ( self , __lowercase , __lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = self.preprocess_text(__lowercase ) lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase ) else: lowerCAmelCase_ : Any = [self.preprocess_text(__lowercase ) for t in text] lowerCAmelCase_ : List[str] = self.sp_model.encode(__lowercase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase_ : int = torch.tensor(__lowercase ) return token_ids def lowercase_ ( self , __lowercase ) -> str: return self.sp_model.decode(__lowercase ) def lowercase_ ( self , __lowercase ) -> List[int]: lowerCAmelCase_ : Optional[int] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase_ : List[str] = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__lowercase ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__lowercase )
619
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCAmelCase : Any =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """deta""" SCREAMING_SNAKE_CASE__ : Dict = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __lowercase=None , __lowercase=9_0_0 , __lowercase=2_0_4_8 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=8 , __lowercase=6 , __lowercase=1_0_2_4 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase=5 , __lowercase=4 , __lowercase=4 , __lowercase=True , __lowercase=3_0_0 , __lowercase=True , __lowercase=True , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , **__lowercase , ) -> Dict: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase_ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[int] = backbone_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ : Tuple = config_class.from_dict(__lowercase ) lowerCAmelCase_ : str = backbone_config lowerCAmelCase_ : Any = num_queries lowerCAmelCase_ : Optional[int] = max_position_embeddings lowerCAmelCase_ : Any = d_model lowerCAmelCase_ : str = encoder_ffn_dim lowerCAmelCase_ : List[Any] = encoder_layers lowerCAmelCase_ : List[str] = encoder_attention_heads lowerCAmelCase_ : Optional[Any] = decoder_ffn_dim lowerCAmelCase_ : Dict = decoder_layers lowerCAmelCase_ : List[str] = decoder_attention_heads lowerCAmelCase_ : Any = dropout lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Optional[int] = activation_dropout lowerCAmelCase_ : Dict = activation_function lowerCAmelCase_ : str = init_std lowerCAmelCase_ : Optional[Any] = init_xavier_std lowerCAmelCase_ : Optional[Any] = encoder_layerdrop lowerCAmelCase_ : Tuple = auxiliary_loss lowerCAmelCase_ : str = position_embedding_type # deformable attributes lowerCAmelCase_ : List[Any] = num_feature_levels lowerCAmelCase_ : List[Any] = encoder_n_points lowerCAmelCase_ : List[str] = decoder_n_points lowerCAmelCase_ : str = two_stage lowerCAmelCase_ : List[str] = two_stage_num_proposals lowerCAmelCase_ : Optional[int] = with_box_refine lowerCAmelCase_ : Tuple = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCAmelCase_ : Optional[int] = class_cost lowerCAmelCase_ : Dict = bbox_cost lowerCAmelCase_ : Optional[Any] = giou_cost # Loss coefficients lowerCAmelCase_ : str = mask_loss_coefficient lowerCAmelCase_ : Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ : Optional[int] = bbox_loss_coefficient lowerCAmelCase_ : Union[str, Any] = giou_loss_coefficient lowerCAmelCase_ : List[Any] = eos_coefficient lowerCAmelCase_ : Tuple = focal_alpha super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def lowercase_ ( self ) -> int: return self.encoder_attention_heads @property def lowercase_ ( self ) -> int: return self.d_model def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : Optional[Any] = self.backbone_config.to_dict() lowerCAmelCase_ : List[Any] = self.__class__.model_type return output
619
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
1
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any) _UpperCAmelCase : Any =NewType("""DataClassType""", Any) def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]: lowerCAmelCase_ : Union[str, Any] = {str(lowerCAmelCase_ ): choice for choice in choices} return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( *, lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls lowerCAmelCase_ : str = {} if aliases is not None: lowerCAmelCase_ : Any = aliases if help is not None: lowerCAmelCase_ : Union[str, Any] = help return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Iterable[DataClassType] def __init__( self , __lowercase , **__lowercase ) -> List[Any]: # To make the default appear when using --help if "formatter_class" not in kwargs: lowerCAmelCase_ : Any = ArgumentDefaultsHelpFormatter super().__init__(**__lowercase ) if dataclasses.is_dataclass(__lowercase ): lowerCAmelCase_ : str = [dataclass_types] lowerCAmelCase_ : Dict = list(__lowercase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__lowercase ) @staticmethod def lowercase_ ( __lowercase , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : int = f"""--{field.name}""" lowerCAmelCase_ : Optional[Any] = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __lowercase ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) lowerCAmelCase_ : List[Any] = kwargs.pop('''aliases''' , [] ) if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Any = [aliases] lowerCAmelCase_ : str = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__lowercase ) not in field.type.__args__: # filter `str` in Union lowerCAmelCase_ : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) lowerCAmelCase_ : List[Any] = ( field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1] ) lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) lowerCAmelCase_ : Dict = {} if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )): if origin_type is Literal: lowerCAmelCase_ : Union[str, Any] = field.type.__args__ else: lowerCAmelCase_ : Optional[int] = [x.value for x in field.type] lowerCAmelCase_ : Dict = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: lowerCAmelCase_ : Tuple = field.default else: lowerCAmelCase_ : List[str] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument lowerCAmelCase_ : List[Any] = copy(__lowercase ) # Hack because type=bool in argparse does not behave as we want. lowerCAmelCase_ : Optional[int] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. lowerCAmelCase_ : int = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way lowerCAmelCase_ : Union[str, Any] = default # This tells argparse we accept 0 or 1 value after --field_name lowerCAmelCase_ : str = '''?''' # This is the value that will get picked if we do --field_name (without value) lowerCAmelCase_ : Dict = True elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[int] = field.type.__args__[0] lowerCAmelCase_ : int = '''+''' if field.default_factory is not dataclasses.MISSING: lowerCAmelCase_ : Tuple = field.default_factory() elif field.default is dataclasses.MISSING: lowerCAmelCase_ : Optional[Any] = True else: lowerCAmelCase_ : Dict = field.type if field.default is not dataclasses.MISSING: lowerCAmelCase_ : int = field.default elif field.default_factory is not dataclasses.MISSING: lowerCAmelCase_ : str = field.default_factory() else: lowerCAmelCase_ : int = True parser.add_argument(__lowercase , *__lowercase , **__lowercase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): lowerCAmelCase_ : Optional[Any] = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase ) def lowercase_ ( self , __lowercase ) -> List[Any]: if hasattr(__lowercase , '''_argument_group_name''' ): lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name ) else: lowerCAmelCase_ : Any = self try: lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ): lowerCAmelCase_ : Union[str, Any] = '''.'''.join(map(__lowercase , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__lowercase ): if not field.init: continue lowerCAmelCase_ : List[str] = type_hints[field.name] self._parse_dataclass_field(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): lowerCAmelCase_ : Any = [] if args_filename: args_files.append(Path(__lowercase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values lowerCAmelCase_ : Optional[Any] = ArgumentParser() args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) lowerCAmelCase_ , lowerCAmelCase_ : int = args_file_parser.parse_known_args(args=__lowercase ) lowerCAmelCase_ : List[Any] = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase ) if cmd_args_file_paths: args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] ) lowerCAmelCase_ : Dict = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last lowerCAmelCase_ : List[str] = file_args + args if args is not None else file_args + sys.argv[1:] lowerCAmelCase_ , lowerCAmelCase_ : int = self.parse_known_args(args=__lowercase ) lowerCAmelCase_ : Optional[Any] = [] for dtype in self.dataclass_types: lowerCAmelCase_ : Any = {f.name for f in dataclasses.fields(__lowercase ) if f.init} lowerCAmelCase_ : int = {k: v for k, v in vars(__lowercase ).items() if k in keys} for k in keys: delattr(__lowercase , __lowercase ) lowerCAmelCase_ : Tuple = dtype(**__lowercase ) outputs.append(__lowercase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__lowercase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]: lowerCAmelCase_ : List[Any] = set(args.keys() ) lowerCAmelCase_ : Optional[Any] = [] for dtype in self.dataclass_types: lowerCAmelCase_ : List[str] = {f.name for f in dataclasses.fields(__lowercase ) if f.init} lowerCAmelCase_ : Dict = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) lowerCAmelCase_ : List[str] = dtype(**__lowercase ) outputs.append(__lowercase ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" ) return tuple(__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]: with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file: lowerCAmelCase_ : List[str] = json.loads(open_json_file.read() ) lowerCAmelCase_ : List[str] = self.parse_dict(__lowercase , allow_extra_keys=__lowercase ) return tuple(__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]: lowerCAmelCase_ : Optional[int] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase ) return tuple(__lowercase )
619
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> str: lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase_ : Dict = 8 # DPR tok lowerCAmelCase_ : Optional[int] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__lowercase , exist_ok=__lowercase ) lowerCAmelCase_ : Any = os.path.join(__lowercase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase_ : Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase_ : Tuple = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) lowerCAmelCase_ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase_ : Tuple = {'''unk_token''': '''<unk>'''} lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__lowercase , exist_ok=__lowercase ) lowerCAmelCase_ : Dict = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ : Union[str, Any] = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def lowercase_ ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def lowercase_ ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def lowercase_ ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) lowerCAmelCase_ : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) lowerCAmelCase_ : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__lowercase ) rag_tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : Optional[int] = RagTokenizer.from_pretrained(__lowercase , config=__lowercase ) self.assertIsInstance(new_rag_tokenizer.question_encoder , __lowercase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , __lowercase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) lowerCAmelCase_ : Any = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] lowerCAmelCase_ : int = tokenizer(__lowercase ) self.assertIsNotNone(__lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) lowerCAmelCase_ : Any = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] lowerCAmelCase_ : Dict = tokenizer(__lowercase ) self.assertIsNotNone(__lowercase )
619
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) _UpperCAmelCase : Tuple ={} class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """llama""" SCREAMING_SNAKE_CASE__ : Any = ["""past_key_values"""] def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=4_0_9_6 , __lowercase=1_1_0_0_8 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=None , __lowercase="silu" , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=1 , __lowercase=False , __lowercase=None , **__lowercase , ) -> Optional[int]: lowerCAmelCase_ : List[str] = vocab_size lowerCAmelCase_ : Optional[int] = max_position_embeddings lowerCAmelCase_ : List[str] = hidden_size lowerCAmelCase_ : List[str] = intermediate_size lowerCAmelCase_ : int = num_hidden_layers lowerCAmelCase_ : Optional[int] = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase_ : List[Any] = num_attention_heads lowerCAmelCase_ : Optional[Any] = num_key_value_heads lowerCAmelCase_ : Dict = hidden_act lowerCAmelCase_ : str = initializer_range lowerCAmelCase_ : Optional[int] = rms_norm_eps lowerCAmelCase_ : List[str] = pretraining_tp lowerCAmelCase_ : int = use_cache lowerCAmelCase_ : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase , ) def lowercase_ ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) lowerCAmelCase_ : Dict = self.rope_scaling.get('''type''' , __lowercase ) lowerCAmelCase_ : int = self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
619
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device _UpperCAmelCase : List[Any] =False class snake_case__( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : str = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' lowerCAmelCase_ : List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ : Optional[Any] = pipe( prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowercase ) lowerCAmelCase_ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowercase ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = generator.manual_seed(0 ) lowerCAmelCase_ : Any = pipe( prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Dict = '''A painting of a squirrel eating a burger ''' lowerCAmelCase_ : List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe( prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images lowerCAmelCase_ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : str = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
619
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False )-> Optional[Any]: lowerCAmelCase_ : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> List[str]: for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase_ : Optional[Any] = '''''' else: lowerCAmelCase_ : Optional[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase_ : Optional[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ : Optional[int] = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase_ : List[str] = in_proj_bias[: config.hidden_size] lowerCAmelCase_ : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :] def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> int: # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCAmelCase_ : Optional[int] = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple: lowerCAmelCase_ : Union[str, Any] = dct.pop(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = val def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : Any = ViTMSNConfig() lowerCAmelCase_ : Optional[int] = 1_000 lowerCAmelCase_ : Optional[int] = '''datasets/huggingface/label-files''' lowerCAmelCase_ : Optional[Any] = '''imagenet-1k-id2label.json''' lowerCAmelCase_ : List[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) ) lowerCAmelCase_ : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = idalabel lowerCAmelCase_ : Dict = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCAmelCase_ : str = 384 lowerCAmelCase_ : Optional[Any] = 1_536 lowerCAmelCase_ : Dict = 6 elif "l16" in checkpoint_url: lowerCAmelCase_ : Dict = 1_024 lowerCAmelCase_ : List[Any] = 4_096 lowerCAmelCase_ : Optional[Any] = 24 lowerCAmelCase_ : Optional[int] = 16 lowerCAmelCase_ : List[str] = 0.1 elif "b4" in checkpoint_url: lowerCAmelCase_ : Dict = 4 elif "l7" in checkpoint_url: lowerCAmelCase_ : Tuple = 7 lowerCAmelCase_ : Any = 1_024 lowerCAmelCase_ : Optional[int] = 4_096 lowerCAmelCase_ : Optional[Any] = 24 lowerCAmelCase_ : Optional[Any] = 16 lowerCAmelCase_ : Optional[int] = 0.1 lowerCAmelCase_ : List[str] = ViTMSNModel(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )['''target_encoder'''] lowerCAmelCase_ : List[str] = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , base_model=lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() lowerCAmelCase_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ : str = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) lowerCAmelCase_ : Optional[Any] = ViTImageProcessor( size=config.image_size , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ ) lowerCAmelCase_ : str = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowerCAmelCase_ : Optional[Any] = model(**lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCAmelCase_ : Optional[int] = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: lowerCAmelCase_ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: lowerCAmelCase_ : str = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: lowerCAmelCase_ : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: lowerCAmelCase_ : Union[str, Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase_ , atol=1e-4 ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _UpperCAmelCase : str =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _UpperCAmelCase : List[Any] =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
619
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : List[Any] ={ """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =[ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
1
import numpy as np from transformers import Pipeline def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: lowerCAmelCase_ : List[Any] = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) lowerCAmelCase_ : Dict = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self , **__lowercase ) -> Optional[int]: lowerCAmelCase_ : Tuple = {} if "second_text" in kwargs: lowerCAmelCase_ : Tuple = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def lowercase_ ( self , __lowercase , __lowercase=None ) -> Tuple: return self.tokenizer(__lowercase , text_pair=__lowercase , return_tensors=self.framework ) def lowercase_ ( self , __lowercase ) -> Tuple: return self.model(**__lowercase ) def lowercase_ ( self , __lowercase ) -> Dict: lowerCAmelCase_ : List[str] = model_outputs.logits[0].numpy() lowerCAmelCase_ : Union[str, Any] = softmax(__lowercase ) lowerCAmelCase_ : str = np.argmax(__lowercase ) lowerCAmelCase_ : Union[str, Any] = self.model.config.idalabel[best_class] lowerCAmelCase_ : int = probabilities[best_class].item() lowerCAmelCase_ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
619
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Optional[int] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') lowerCAmelCase_ : Union[str, Any] = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) lowerCAmelCase_ : str = model.state_dict() def to_tf_var_name(lowerCAmelCase_ ): for patt, repl in iter(lowerCAmelCase_ ): lowerCAmelCase_ : Dict = name.replace(lowerCAmelCase_ , lowerCAmelCase_ ) return f"""bert/{name}""" def create_tf_var(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): lowerCAmelCase_ : Any = tf.dtypes.as_dtype(tensor.dtype ) lowerCAmelCase_ : List[str] = tf.get_variable(dtype=lowerCAmelCase_ , shape=tensor.shape , name=lowerCAmelCase_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowerCAmelCase_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowerCAmelCase_ : Union[str, Any] = to_tf_var_name(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowerCAmelCase_ : Union[str, Any] = torch_tensor.T lowerCAmelCase_ : Union[str, Any] = create_tf_var(tensor=lowerCAmelCase_ , name=lowerCAmelCase_ , session=lowerCAmelCase_ ) tf.keras.backend.set_value(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = session.run(lowerCAmelCase_ ) print(f"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) lowerCAmelCase_ : int = tf.train.Saver(tf.trainable_variables() ) saver.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def lowerCAmelCase ( lowerCAmelCase_=None )-> Any: lowerCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory in which to save tensorflow model''' ) lowerCAmelCase_ : List[str] = parser.parse_args(lowerCAmelCase_ ) lowerCAmelCase_ : str = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
619
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
1
import math class snake_case__: '''simple docstring''' def lowercase_ ( self , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Any = 0.0 lowerCAmelCase_ : Tuple = 0.0 for i in range(len(__lowercase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> list[list[int | float]]: for i in range(len(__lowercase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowerCAmelCase ( )-> None: # Training Examples ( m, n ) lowerCAmelCase_ : List[str] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) lowerCAmelCase_ : Dict = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training lowerCAmelCase_ : List[Any] = SelfOrganizingMap() lowerCAmelCase_ : Optional[int] = 3 lowerCAmelCase_ : Optional[Any] = 0.5 for _ in range(lowerCAmelCase_ ): for j in range(len(lowerCAmelCase_ ) ): # training sample lowerCAmelCase_ : Dict = training_samples[j] # Compute the winning vector lowerCAmelCase_ : Any = self_organizing_map.get_winner(lowerCAmelCase_ , lowerCAmelCase_ ) # Update the winning vector lowerCAmelCase_ : Optional[int] = self_organizing_map.update(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # classify test sample lowerCAmelCase_ : Any = [0, 0, 0, 1] lowerCAmelCase_ : List[str] = self_organizing_map.get_winner(lowerCAmelCase_ , lowerCAmelCase_ ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
619
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCAmelCase : int =logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] ={ """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """table-transformer""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ : Tuple = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=1_0_0 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=8 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=8 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , **__lowercase , ) -> Union[str, Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase_ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' ) lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ : int = config_class.from_dict(__lowercase ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = None, None, None lowerCAmelCase_ : Tuple = use_timm_backbone lowerCAmelCase_ : str = backbone_config lowerCAmelCase_ : Dict = num_channels lowerCAmelCase_ : str = num_queries lowerCAmelCase_ : List[Any] = d_model lowerCAmelCase_ : int = encoder_ffn_dim lowerCAmelCase_ : Union[str, Any] = encoder_layers lowerCAmelCase_ : Dict = encoder_attention_heads lowerCAmelCase_ : str = decoder_ffn_dim lowerCAmelCase_ : Optional[Any] = decoder_layers lowerCAmelCase_ : Dict = decoder_attention_heads lowerCAmelCase_ : Optional[Any] = dropout lowerCAmelCase_ : List[Any] = attention_dropout lowerCAmelCase_ : Dict = activation_dropout lowerCAmelCase_ : Optional[Any] = activation_function lowerCAmelCase_ : Union[str, Any] = init_std lowerCAmelCase_ : Optional[Any] = init_xavier_std lowerCAmelCase_ : int = encoder_layerdrop lowerCAmelCase_ : Optional[int] = decoder_layerdrop lowerCAmelCase_ : Union[str, Any] = encoder_layers lowerCAmelCase_ : Dict = auxiliary_loss lowerCAmelCase_ : Optional[int] = position_embedding_type lowerCAmelCase_ : str = backbone lowerCAmelCase_ : Union[str, Any] = use_pretrained_backbone lowerCAmelCase_ : Union[str, Any] = dilation # Hungarian matcher lowerCAmelCase_ : int = class_cost lowerCAmelCase_ : Union[str, Any] = bbox_cost lowerCAmelCase_ : Any = giou_cost # Loss coefficients lowerCAmelCase_ : int = mask_loss_coefficient lowerCAmelCase_ : int = dice_loss_coefficient lowerCAmelCase_ : Dict = bbox_loss_coefficient lowerCAmelCase_ : List[str] = giou_loss_coefficient lowerCAmelCase_ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowercase , **__lowercase ) @property def lowercase_ ( self ) -> int: return self.encoder_attention_heads @property def lowercase_ ( self ) -> int: return self.d_model class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = version.parse("""1.11""" ) @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def lowercase_ ( self ) -> float: return 1e-5 @property def lowercase_ ( self ) -> int: return 1_2
619
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : int =logging.get_logger(__name__) _UpperCAmelCase : List[Any] ={ """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _UpperCAmelCase : Optional[int] ={ """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _UpperCAmelCase : Union[str, Any] ={"""facebook/blenderbot_small-90M""": 512} def lowerCAmelCase ( lowerCAmelCase_ )-> Any: lowerCAmelCase_ : Optional[Any] = set() lowerCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase_ : int = char lowerCAmelCase_ : str = set(lowerCAmelCase_ ) return pairs class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]: super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase ) with open(__lowercase , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase_ : str = json.load(__lowercase ) lowerCAmelCase_ : Any = {v: k for k, v in self.encoder.items()} with open(__lowercase , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase_ : Dict = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in merges] lowerCAmelCase_ : int = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) lowerCAmelCase_ : Optional[Any] = {} @property def lowercase_ ( self ) -> int: return len(self.encoder ) def lowercase_ ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self , __lowercase ) -> str: if token in self.cache: return self.cache[token] lowerCAmelCase_ : int = re.sub('''([.,!?()])''' , R''' \1''' , __lowercase ) lowerCAmelCase_ : List[str] = re.sub('''(\')''' , R''' \1 ''' , __lowercase ) lowerCAmelCase_ : Optional[int] = re.sub(R'''\s{2,}''' , ''' ''' , __lowercase ) if "\n" in token: lowerCAmelCase_ : Any = token.replace('''\n''' , ''' __newln__''' ) lowerCAmelCase_ : List[str] = token.split(''' ''' ) lowerCAmelCase_ : List[Any] = [] for token in tokens: if not len(__lowercase ): continue lowerCAmelCase_ : Union[str, Any] = token.lower() lowerCAmelCase_ : Optional[int] = tuple(__lowercase ) lowerCAmelCase_ : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase_ : Optional[int] = get_pairs(__lowercase ) if not pairs: words.append(__lowercase ) continue while True: lowerCAmelCase_ : int = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram lowerCAmelCase_ : Union[str, Any] = [] lowerCAmelCase_ : str = 0 while i < len(__lowercase ): try: lowerCAmelCase_ : Optional[Any] = word.index(__lowercase , __lowercase ) new_word.extend(word[i:j] ) lowerCAmelCase_ : str = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase_ : Union[str, Any] = tuple(__lowercase ) lowerCAmelCase_ : Union[str, Any] = new_word if len(__lowercase ) == 1: break else: lowerCAmelCase_ : Dict = get_pairs(__lowercase ) lowerCAmelCase_ : Dict = '''@@ '''.join(__lowercase ) lowerCAmelCase_ : Optional[Any] = word[:-4] lowerCAmelCase_ : Optional[int] = word words.append(__lowercase ) return " ".join(__lowercase ) def lowercase_ ( self , __lowercase ) -> List[str]: lowerCAmelCase_ : Optional[int] = [] lowerCAmelCase_ : List[Any] = re.findall(R'''\S+\n?''' , __lowercase ) for token in words: split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) ) return split_tokens def lowercase_ ( self , __lowercase ) -> int: lowerCAmelCase_ : int = token.lower() return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def lowercase_ ( self , __lowercase ) -> str: return self.decoder.get(__lowercase , self.unk_token ) def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : int = ''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip() return out_string def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : Dict = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ : Tuple = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' ) lowerCAmelCase_ : Optional[int] = 0 with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase_ : Tuple = token_index writer.write(''' '''.join(__lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file
619
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
1
def lowerCAmelCase ( lowerCAmelCase_ )-> list[list]: lowerCAmelCase_ : Optional[int] = current_set.copy() for row_index, row in enumerate(lowerCAmelCase_ ): lowerCAmelCase_ : Dict = row[0] for column_index, column in enumerate(lowerCAmelCase_ ): if magnitude == 0: lowerCAmelCase_ : List[Any] = column continue lowerCAmelCase_ : Union[str, Any] = column / magnitude # Subtract to cancel term lowerCAmelCase_ : Tuple = current_set[0] lowerCAmelCase_ : str = [first_row] lowerCAmelCase_ : Union[str, Any] = current_set[1::] for row in current_set: lowerCAmelCase_ : Tuple = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCAmelCase_ ) continue for column_index in range(len(lowerCAmelCase_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCAmelCase_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowerCAmelCase_ : Union[str, Any] = final_set[0] lowerCAmelCase_ : Optional[int] = [] lowerCAmelCase_ : int = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowerCAmelCase_ : List[str] = simplify(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , lowerCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = resultant return final_set def lowerCAmelCase ( lowerCAmelCase_ )-> list: if len(lowerCAmelCase_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) + 1 if any(len(lowerCAmelCase_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(lowerCAmelCase_ ) == 1: return [equations[0][-1] / equations[0][0]] lowerCAmelCase_ : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): lowerCAmelCase_ : int = data_set.copy() lowerCAmelCase_ : str = [] for row_index, row in enumerate(lowerCAmelCase_ ): if 0 not in row: lowerCAmelCase_ : Any = data_set.pop(lowerCAmelCase_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , lowerCAmelCase_ ) lowerCAmelCase_ : Any = data_set.copy() lowerCAmelCase_ : str = simplify(lowerCAmelCase_ ) lowerCAmelCase_ : Any = simplified[::-1] lowerCAmelCase_ : list = [] for row in simplified: lowerCAmelCase_ : Dict = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowerCAmelCase_ : str = row.copy()[: len(lowerCAmelCase_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCAmelCase_ ) == 0: solutions.append(0 ) continue lowerCAmelCase_ : List[str] = temp_row[1::] lowerCAmelCase_ : int = temp_row[::-1] for column_index, column in enumerate(lowerCAmelCase_ ): current_solution -= column * solutions[column_index] solutions.append(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = [] for item in solutions: final.append(float(round(lowerCAmelCase_ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[Any] =[ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
619
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=3 , __lowercase=3_2 , __lowercase=3 , __lowercase=1_0 , __lowercase=[1_0, 2_0, 3_0, 4_0] , __lowercase=[1, 1, 2, 1] , __lowercase=True , __lowercase=True , __lowercase="relu" , __lowercase=3 , __lowercase=None , ) -> int: lowerCAmelCase_ : str = parent lowerCAmelCase_ : str = batch_size lowerCAmelCase_ : Dict = image_size lowerCAmelCase_ : Dict = num_channels lowerCAmelCase_ : List[str] = embeddings_size lowerCAmelCase_ : List[Any] = hidden_sizes lowerCAmelCase_ : List[str] = depths lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Dict = use_labels lowerCAmelCase_ : Any = hidden_act lowerCAmelCase_ : Union[str, Any] = num_labels lowerCAmelCase_ : Dict = scope lowerCAmelCase_ : Any = len(__lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : str = None if self.use_labels: lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ : str = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : List[str] = TFRegNetModel(config=__lowercase ) lowerCAmelCase_ : int = model(__lowercase , training=__lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = self.num_labels lowerCAmelCase_ : List[str] = TFRegNetForImageClassification(__lowercase ) lowerCAmelCase_ : Any = model(__lowercase , labels=__lowercase , training=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs lowerCAmelCase_ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : int = ( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Any = TFRegNetModelTester(self ) lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Tuple: return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def lowercase_ ( self ) -> Union[str, Any]: super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ) lowerCAmelCase_ : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Optional[Any] = [*signature.parameters.keys()] lowerCAmelCase_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def lowercase_ ( self ) -> List[Any]: def check_hidden_states_output(__lowercase , __lowercase , __lowercase ): lowerCAmelCase_ : List[Any] = model_class(__lowercase ) lowerCAmelCase_ : Any = model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Optional[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCAmelCase_ : Union[str, Any] = layer_type lowerCAmelCase_ : Tuple = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : Optional[Any] = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowercase , __lowercase , __lowercase , __lowercase={} ): lowerCAmelCase_ : List[str] = model(__lowercase , return_dict=__lowercase , **__lowercase ) lowerCAmelCase_ : List[Any] = model(__lowercase , return_dict=__lowercase , **__lowercase ).to_tuple() def recursive_check(__lowercase , __lowercase ): if isinstance(__lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowercase , __lowercase ): recursive_check(__lowercase , __lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowercase , __lowercase ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__lowercase , __lowercase ) for model_class in self.all_model_classes: lowerCAmelCase_ : Any = model_class(__lowercase ) lowerCAmelCase_ : int = self._prepare_for_class(__lowercase , __lowercase ) lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(__lowercase , __lowercase ) check_equivalence(__lowercase , __lowercase , __lowercase ) lowerCAmelCase_ : Optional[int] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) lowerCAmelCase_ : str = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) check_equivalence(__lowercase , __lowercase , __lowercase ) lowerCAmelCase_ : List[Any] = self._prepare_for_class(__lowercase , __lowercase ) lowerCAmelCase_ : Optional[int] = self._prepare_for_class(__lowercase , __lowercase ) check_equivalence(__lowercase , __lowercase , __lowercase , {'''output_hidden_states''': True} ) lowerCAmelCase_ : int = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) lowerCAmelCase_ : Any = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) check_equivalence(__lowercase , __lowercase , __lowercase , {'''output_hidden_states''': True} ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @slow def lowercase_ ( self ) -> int: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : List[Any] = TFRegNetModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCAmelCase_ : Optional[int] = self.default_image_processor lowerCAmelCase_ : Dict = prepare_img() lowerCAmelCase_ : Optional[int] = image_processor(images=__lowercase , return_tensors='''tf''' ) # forward pass lowerCAmelCase_ : Optional[int] = model(**__lowercase , training=__lowercase ) # verify the logits lowerCAmelCase_ : Dict = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __lowercase ) lowerCAmelCase_ : Union[str, Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
619
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """ClapFeatureExtractor""" SCREAMING_SNAKE_CASE__ : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , __lowercase , __lowercase ) -> str: super().__init__(__lowercase , __lowercase ) def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = kwargs.pop('''sampling_rate''' , __lowercase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowerCAmelCase_ : str = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase ) if audios is not None: lowerCAmelCase_ : Union[str, Any] = self.feature_extractor( __lowercase , sampling_rate=__lowercase , return_tensors=__lowercase , **__lowercase ) if text is not None and audios is not None: lowerCAmelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase ) def lowercase_ ( self , *__lowercase , **__lowercase ) -> List[str]: return self.tokenizer.batch_decode(*__lowercase , **__lowercase ) def lowercase_ ( self , *__lowercase , **__lowercase ) -> Dict: return self.tokenizer.decode(*__lowercase , **__lowercase ) @property def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[Any] = self.tokenizer.model_input_names lowerCAmelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
619
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _UpperCAmelCase : List[Any] =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self , __lowercase=3_2 ) -> int: set_seed(0 ) lowerCAmelCase_ : Union[str, Any] = UNetaDModel(sample_size=__lowercase , in_channels=3 , out_channels=3 ) lowerCAmelCase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 ) return model, optimizer @slow def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : str = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase_ : Optional[int] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowercase , ) lowerCAmelCase_ : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowercase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase_ : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowercase ) for _ in range(4 )] lowerCAmelCase_ : Union[str, Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowercase ) for _ in range(4 )] lowerCAmelCase_ : int = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowercase ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(__lowercase ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase_ : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase_ : Dict = model(__lowercase , timesteps[i] ).sample lowerCAmelCase_ : Tuple = torch.nn.functional.mse_loss(__lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(__lowercase ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase_ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase_ : Union[str, Any] = model(__lowercase , timesteps[i] ).sample lowerCAmelCase_ : Optional[int] = torch.nn.functional.mse_loss(__lowercase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
619
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
1
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self , __lowercase ) -> float: return 0.0 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[int | float, int | float]: lowerCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCAmelCase_ : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : List[Any] = 512 lowerCAmelCase_ : Any = [1] + [0] * (size - 1) lowerCAmelCase_ : Optional[int] = [filter_type.process(lowerCAmelCase_ ) for item in inputs] lowerCAmelCase_ : int = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : Tuple = np.abs(np.fft.fft(lowerCAmelCase_ ) ) lowerCAmelCase_ : Any = 20 * np.logaa(lowerCAmelCase_ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCAmelCase_ : Dict = get_bounds(lowerCAmelCase_ , lowerCAmelCase_ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(lowerCAmelCase_ ) plt.show() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : Union[str, Any] = 512 lowerCAmelCase_ : Optional[Any] = [1] + [0] * (size - 1) lowerCAmelCase_ : Optional[Any] = [filter_type.process(lowerCAmelCase_ ) for item in inputs] lowerCAmelCase_ : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : Dict = np.angle(np.fft.fft(lowerCAmelCase_ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(lowerCAmelCase_ , -2 * pi ) ) plt.show()
619
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Optional[Any] ={"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[str] =["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =[ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
1
def lowerCAmelCase ( lowerCAmelCase_ = 50 )-> int: lowerCAmelCase_ : List[str] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
619
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : Any =16 _UpperCAmelCase : Optional[Any] =32 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> str: lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ : Optional[int] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ : Dict = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase_ : Any = 8 else: lowerCAmelCase_ : List[str] = None return tokenizer.pad( lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCAmelCase_ : int = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) lowerCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCAmelCase : List[str] =mocked_dataloaders # noqa: F811 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1": lowerCAmelCase_ : Any = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCAmelCase_ : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: lowerCAmelCase_ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ : Tuple = config['''lr'''] lowerCAmelCase_ : List[Any] = int(config['''num_epochs'''] ) lowerCAmelCase_ : List[str] = int(config['''seed'''] ) lowerCAmelCase_ : List[Any] = int(config['''batch_size'''] ) set_seed(lowerCAmelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation lowerCAmelCase_ : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase_ : Dict = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase_ : str = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ : List[str] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler lowerCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCAmelCase_ : List[str] = os.path.split(lowerCAmelCase_ )[-1].split('''.''' )[0] accelerator.init_trackers(lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCAmelCase_ : str = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase_ : List[Any] = model(**lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCAmelCase_ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) lowerCAmelCase_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(lowerCAmelCase_ ), '''epoch''': epoch, } , step=lowerCAmelCase_ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowerCAmelCase ( )-> Dict: lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=lowerCAmelCase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) lowerCAmelCase_ : Dict = parser.parse_args() lowerCAmelCase_ : List[str] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
619
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
1
import pytest _UpperCAmelCase : List[str] ="""__dummy_dataset1__""" _UpperCAmelCase : Dict =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def lowerCAmelCase ( )-> int: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCAmelCase ( )-> Tuple: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: lowerCAmelCase_ : List[str] = dataset_loading_script_name lowerCAmelCase_ : Union[str, Any] = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = script_dir / f"""{script_name}.py""" with open(lowerCAmelCase_ , '''w''' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ )
619
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
1
def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : List[str] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int: lowerCAmelCase_ : int = 1 lowerCAmelCase_ : Optional[Any] = 2 for i in range(2 , max_n + 1 ): lowerCAmelCase_ : List[str] = pre_numerator lowerCAmelCase_ : List[str] = 2 * i // 3 if i % 3 == 0 else 1 lowerCAmelCase_ : Any = cur_numerator lowerCAmelCase_ : List[str] = e_cont * pre_numerator + temp return sum_digits(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""{solution() = }""")
619
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Tuple ="""▁""" _UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model"""} _UpperCAmelCase : Union[str, Any] ={ """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""} } _UpperCAmelCase : Any ={ """google/pegasus-xsum""": 512, } _UpperCAmelCase : int =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase="<pad>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<mask_2>" , __lowercase="<mask_1>" , __lowercase=None , __lowercase=1_0_3 , __lowercase = None , **__lowercase , ) -> None: lowerCAmelCase_ : Tuple = offset if additional_special_tokens is not None: if not isinstance(__lowercase , __lowercase ): raise TypeError( f"""additional_special_tokens should be of type {type(__lowercase )}, but is""" f""" {type(__lowercase )}""" ) lowerCAmelCase_ : Union[str, Any] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(__lowercase ) , self.offset - 1 ) ] if len(set(__lowercase ) ) != len(__lowercase ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) lowerCAmelCase_ : Optional[Any] = additional_special_tokens_extended else: lowerCAmelCase_ : str = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] lowerCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowercase , unk_token=__lowercase , mask_token=__lowercase , pad_token=__lowercase , mask_token_sent=__lowercase , offset=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) lowerCAmelCase_ : List[Any] = mask_token_sent lowerCAmelCase_ : Union[str, Any] = vocab_file lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) # add special tokens to encoder dict lowerCAmelCase_ : Dict[int, str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) lowerCAmelCase_ : Dict[str, int] = {v: k for k, v in self.encoder.items()} @property def lowercase_ ( self ) -> int: return len(self.sp_model ) + self.offset def lowercase_ ( self ) -> Dict[str, int]: lowerCAmelCase_ : Any = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: lowerCAmelCase_ : str = self.__dict__.copy() lowerCAmelCase_ : Optional[int] = None return state def __setstate__( self , __lowercase ) -> List[str]: lowerCAmelCase_ : Optional[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase_ : int = {} lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase_ ( self , __lowercase ) -> List[str]: return self.sp_model.encode(__lowercase , out_type=__lowercase ) def lowercase_ ( self , __lowercase ) -> int: if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowerCAmelCase_ : Tuple = self.sp_model.piece_to_id(__lowercase ) return sp_id + self.offset def lowercase_ ( self , __lowercase ) -> str: if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowerCAmelCase_ : int = self.sp_model.IdToPiece(index - self.offset ) return token def lowercase_ ( self , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = [] lowerCAmelCase_ : Union[str, Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowercase ) + token lowerCAmelCase_ : Tuple = [] else: current_sub_tokens.append(__lowercase ) out_string += self.sp_model.decode(__lowercase ) return out_string.strip() def lowercase_ ( self , __lowercase=False ) -> List[Any]: return 1 def lowercase_ ( self , __lowercase ) -> Any: lowerCAmelCase_ : Dict = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]: if already_has_special_tokens: return self._special_token_mask(__lowercase ) elif token_ids_a is None: return self._special_token_mask(__lowercase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase_ ( self , __lowercase , __lowercase=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : Optional[Any] = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: lowerCAmelCase_ : int = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,)
619
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : Tuple = data lowerCAmelCase_ : Any = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @staticmethod def lowercase_ ( __lowercase , __lowercase ) -> Optional[int]: return ((n << b) | (n >> (3_2 - b))) & 0xFFFFFFFF def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Dict = B'''\x80''' + B'''\x00''' * (6_3 - (len(self.data ) + 8) % 6_4) lowerCAmelCase_ : Tuple = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) ) return padded_data def lowercase_ ( self ) -> List[str]: return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def lowercase_ ( self , __lowercase ) -> List[str]: lowerCAmelCase_ : str = list(struct.unpack('''>16L''' , __lowercase ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): lowerCAmelCase_ : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[int] = self.padding() lowerCAmelCase_ : str = self.split_blocks() for block in self.blocks: lowerCAmelCase_ : int = self.expand_block(__lowercase ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: lowerCAmelCase_ : str = (b & c) | ((~b) & d) lowerCAmelCase_ : Any = 0x5A827999 elif 2_0 <= i < 4_0: lowerCAmelCase_ : List[Any] = b ^ c ^ d lowerCAmelCase_ : Any = 0x6ED9EBA1 elif 4_0 <= i < 6_0: lowerCAmelCase_ : Optional[int] = (b & c) | (b & d) | (c & d) lowerCAmelCase_ : Optional[int] = 0x8F1BBCDC elif 6_0 <= i < 8_0: lowerCAmelCase_ : List[str] = b ^ c ^ d lowerCAmelCase_ : str = 0xCA62C1D6 lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = ( self.rotate(__lowercase , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF, a, self.rotate(__lowercase , 3_0 ), c, d, ) lowerCAmelCase_ : Tuple = ( self.h[0] + a & 0xFFFFFFFF, self.h[1] + b & 0xFFFFFFFF, self.h[2] + c & 0xFFFFFFFF, self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h ) def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : Optional[int] = B'''Test String''' assert SHAaHash(lowerCAmelCase_ ).final_hash() == hashlib.shaa(lowerCAmelCase_ ).hexdigest() # noqa: S324 def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='''Process some strings or files''' ) parser.add_argument( '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) lowerCAmelCase_ : List[Any] = parser.parse_args() lowerCAmelCase_ : Dict = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: lowerCAmelCase_ : List[str] = f.read() else: lowerCAmelCase_ : Optional[int] = bytes(lowerCAmelCase_ , '''utf-8''' ) print(SHAaHash(lowerCAmelCase_ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
619
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
1
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
619
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _UpperCAmelCase : Optional[int] =getLogger(__name__) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 1_024 , lowerCAmelCase_="val" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_="summarization" , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_ = None , lowerCAmelCase_="" , **lowerCAmelCase_ , )-> Dict: lowerCAmelCase_ : List[str] = str(lowerCAmelCase_ ) assert local_rank is not None torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase_ ) lowerCAmelCase_ : Any = Path(lowerCAmelCase_ ) lowerCAmelCase_ : int = save_dir.joinpath(f"""rank_{local_rank}_output.json""" ) torch.cuda.set_device(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).cuda() if fpaa: lowerCAmelCase_ : List[str] = model.half() # determine if we need to increase num_beams use_task_specific_params(lowerCAmelCase_ , lowerCAmelCase_ ) # update config with task specific params lowerCAmelCase_ : str = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: lowerCAmelCase_ : Optional[Any] = num_return_sequences lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. if max_source_length is None: lowerCAmelCase_ : List[str] = tokenizer.model_max_length if prefix is None: lowerCAmelCase_ : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' lowerCAmelCase_ : Any = SeqaSeqDataset( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_target_length=1_024 , type_path=lowerCAmelCase_ , n_obs=lowerCAmelCase_ , prefix=lowerCAmelCase_ , **lowerCAmelCase_ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. lowerCAmelCase_ : int = ds.make_sortish_sampler(lowerCAmelCase_ , distributed=lowerCAmelCase_ , add_extra_examples=lowerCAmelCase_ , shuffle=lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=ds.collate_fn ) lowerCAmelCase_ : Union[str, Any] = [] for batch in tqdm(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[int] = model.generate( input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase_ , num_beams=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowerCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) lowerCAmelCase_ : Dict = batch['''ids'''] if num_return_sequences > 1: lowerCAmelCase_ : int = chunks(lowerCAmelCase_ , lowerCAmelCase_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(lowerCAmelCase_ ): results.append({'''pred''': pred, '''id''': ids[i].item()} ) save_json(lowerCAmelCase_ , lowerCAmelCase_ ) return results, sampler.num_replicas def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser( epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' ) parser.add_argument('''--data_dir''' , type=lowerCAmelCase_ , help='''like cnn_dm/test.source''' ) parser.add_argument( '''--model_name''' , type=lowerCAmelCase_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , ) parser.add_argument('''--save_dir''' , type=lowerCAmelCase_ , help='''where to save''' , default='''tmp_gen''' ) parser.add_argument('''--max_source_length''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ ) parser.add_argument( '''--type_path''' , type=lowerCAmelCase_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' ) parser.add_argument('''--task''' , type=lowerCAmelCase_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=lowerCAmelCase_ , default=8 , required=lowerCAmelCase_ , help='''batch size''' ) parser.add_argument( '''--local_rank''' , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help='''should be passed by distributed.launch''' ) parser.add_argument( '''--n_obs''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''How many observations. Defaults to all.''' ) parser.add_argument( '''--num_return_sequences''' , type=lowerCAmelCase_ , default=1 , required=lowerCAmelCase_ , help='''How many sequences to return''' ) parser.add_argument( '''--sync_timeout''' , type=lowerCAmelCase_ , default=600 , required=lowerCAmelCase_ , help='''How long should master process wait for other processes to finish.''' , ) parser.add_argument('''--src_lang''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ ) parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ ) parser.add_argument( '''--prefix''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--debug''' , action='''store_true''' ) lowerCAmelCase_ : List[str] = time.time() lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = parser.parse_known_args() lowerCAmelCase_ : Tuple = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase_ ) if generate_kwargs and args.local_rank <= 0: print(f"""parsed the following generate kwargs: {generate_kwargs}""" ) lowerCAmelCase_ : Dict = Path(args.save_dir + '''_tmp''' ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) # this handles locking. lowerCAmelCase_ : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) ) if intermediate_files: raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. lowerCAmelCase_ : List[str] = {} if args.src_lang is not None: lowerCAmelCase_ : List[Any] = args.src_lang if args.tgt_lang is not None: lowerCAmelCase_ : Optional[int] = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ : List[str] = eval_data_dir( args.data_dir , lowerCAmelCase_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ , ) if args.local_rank <= 0: lowerCAmelCase_ : str = Path(args.save_dir ) save_dir.mkdir(exist_ok=lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = gather_results_from_each_node(lowerCAmelCase_ , lowerCAmelCase_ , args.sync_timeout ) lowerCAmelCase_ : int = combine_partial_results(lowerCAmelCase_ ) if args.num_return_sequences > 1: lowerCAmelCase_ : Union[str, Any] = save_dir.joinpath('''pseudolabel_results.json''' ) print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" ) save_json(lowerCAmelCase_ , lowerCAmelCase_ ) return lowerCAmelCase_ : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' ) with open(lowerCAmelCase_ ) as f: lowerCAmelCase_ : int = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase_ )] # Calculate metrics, save metrics, and save _generations.txt lowerCAmelCase_ : int = '''translation''' in args.task lowerCAmelCase_ : Any = calculate_bleu if calc_bleu else calculate_rouge lowerCAmelCase_ : int = '''bleu''' if calc_bleu else '''rouge''' lowerCAmelCase_ : Dict = score_fn(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = len(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = time.time() - start_time lowerCAmelCase_ : Tuple = round(runtime / metrics['''n_obs'''] , 4 ) lowerCAmelCase_ : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics lowerCAmelCase_ : Any = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" ) save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ ) print(lowerCAmelCase_ ) write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) ) if args.debug: write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}.target""" ) ) else: shutil.rmtree(lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> List: lowerCAmelCase_ : Union[str, Any] = [] for partial_result in partial_results: records.extend(lowerCAmelCase_ ) lowerCAmelCase_ : str = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x["id"] ) lowerCAmelCase_ : Any = [x['''pred'''] for x in records] return preds def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Dict[str, List]]: # WAIT FOR lots of .json files lowerCAmelCase_ : Union[str, Any] = time.time() logger.info('''waiting for all nodes to finish''' ) lowerCAmelCase_ : Any = None while (time.time() - start_wait) < timeout: lowerCAmelCase_ : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) ) if len(lowerCAmelCase_ ) < num_replicas: continue try: # make sure all json files are fully saved lowerCAmelCase_ : List[str] = lmap(lowerCAmelCase_ , lowerCAmelCase_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError('''Rank 0 gave up on waiting for other processes''' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
619
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
1
_UpperCAmelCase : str =tuple[float, float, float] _UpperCAmelCase : Any =tuple[float, float, float] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad: lowerCAmelCase_ : Optional[Any] = end_pointa[0] - end_pointa[0] lowerCAmelCase_ : str = end_pointa[1] - end_pointa[1] lowerCAmelCase_ : List[Any] = end_pointa[2] - end_pointa[2] return (x, y, z) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad: lowerCAmelCase_ : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCAmelCase_ : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCAmelCase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> bool: return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 )-> bool: lowerCAmelCase_ : Optional[Any] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
619
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> None: lowerCAmelCase_ : List[Any] = len(__lowercase ) lowerCAmelCase_ : Optional[Any] = [0] * len_array if len_array > 0: lowerCAmelCase_ : Optional[int] = array[0] for i in range(1 , __lowercase ): lowerCAmelCase_ : Tuple = self.prefix_sum[i - 1] + array[i] def lowercase_ ( self , __lowercase , __lowercase ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase_ ( self , __lowercase ) -> bool: lowerCAmelCase_ : List[str] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__lowercase ) return False if __name__ == "__main__": import doctest doctest.testmod()
619
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
1
_UpperCAmelCase : dict[str, float] ={ "joule": 1.0, "kilojoule": 1000, "megajoule": 100_0000, "gigajoule": 10_0000_0000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 360_0000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 418_6800.00, "electronvolt": 1.6_0217_6634E-19, "britishthermalunit_it": 1055.0_5585, "footpound": 1.35_5818, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowerCAmelCase_ : int = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(lowerCAmelCase_ )}""" ) raise ValueError(lowerCAmelCase_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
619
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = ( """This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.""" """It takes two arguments named `image` which should be the original image, and `label` which should be a text """ """describing the elements what should be identified in the segmentation mask. The tool returns the mask.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = """CIDAS/clipseg-rd64-refined""" SCREAMING_SNAKE_CASE__ : Dict = """image_segmenter""" SCREAMING_SNAKE_CASE__ : Dict = CLIPSegForImageSegmentation SCREAMING_SNAKE_CASE__ : int = ["""image""", """text"""] SCREAMING_SNAKE_CASE__ : Dict = ["""image"""] def __init__( self , *__lowercase , **__lowercase ) -> Union[str, Any]: requires_backends(self , ['''vision'''] ) super().__init__(*__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[Any]: return self.pre_processor(text=[label] , images=[image] , padding=__lowercase , return_tensors='''pt''' ) def lowercase_ ( self , __lowercase ) -> Any: with torch.no_grad(): lowerCAmelCase_ : Union[str, Any] = self.model(**__lowercase ).logits return logits def lowercase_ ( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : List[Any] = outputs.cpu().detach().numpy() lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
619
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """pytorch""", """script""": """run_ddp.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """tensorflow""", """script""": """run_tf_dist.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7}, }, ] ) class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Union[str, Any]: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__lowercase , ) assert hasattr(self , '''env''' ) def lowercase_ ( self , __lowercase ) -> Any: lowerCAmelCase_ : Tuple = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings lowerCAmelCase_ : List[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='''py36''' , ) def lowercase_ ( self , __lowercase ) -> Tuple: TrainingJobAnalytics(__lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowercase_ ( self , __lowercase ) -> Dict: # create estimator lowerCAmelCase_ : int = self.create_estimator(__lowercase ) # run training estimator.fit() # result dataframe lowerCAmelCase_ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowerCAmelCase_ : str = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ : str = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __lowercase )
619
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
1
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : List[Any] =get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = AlbertTokenizer SCREAMING_SNAKE_CASE__ : List[str] = AlbertTokenizerFast SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : List[str] = True def lowercase_ ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ : Union[str, Any] = AlbertTokenizer(__lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Union[str, Any] = '''this is a test''' lowerCAmelCase_ : Tuple = '''this is a test''' return input_text, output_text def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Dict = '''<pad>''' lowerCAmelCase_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(__lowercase ) , 3_0_0_0_0 ) def lowercase_ ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def lowercase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase_ : int = '''I was born in 92000, and this is falsé.''' lowerCAmelCase_ : int = tokenizer.tokenize(__lowercase ) lowerCAmelCase_ : str = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : List[str] = self.get_rust_tokenizer() lowerCAmelCase_ : str = tokenizer.encode(__lowercase ) lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[str] = AlbertTokenizer(__lowercase , keep_accents=__lowercase ) lowerCAmelCase_ : Any = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [4_8, 2_5, 2_1, 1_2_8_9] ) lowerCAmelCase_ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual(__lowercase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Dict = AlbertTokenizer(__lowercase ) lowerCAmelCase_ : Optional[int] = tokenizer.encode('''sequence builders''' ) lowerCAmelCase_ : List[Any] = tokenizer.encode('''multi-sequence build''' ) lowerCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase ) lowerCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def lowercase_ ( self ) -> str: # fmt: off lowerCAmelCase_ : Optional[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
619
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
1
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__( UpperCAmelCase__, UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , __lowercase , __lowercase = None , __lowercase = None ) -> Tuple: super().__init__() lowerCAmelCase_ : str = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" lowerCAmelCase_ : Dict = torch.zeros(__lowercase , __lowercase ) else: lowerCAmelCase_ : str = None lowerCAmelCase_ : Union[str, Any] = torch.nn.Parameter(__lowercase ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : VQModel SCREAMING_SNAKE_CASE__ : CLIPTextModel SCREAMING_SNAKE_CASE__ : CLIPTokenizer SCREAMING_SNAKE_CASE__ : TransformeraDModel SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str: super().__init__() self.register_modules( vqvae=__lowercase , transformer=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Any = len(__lowercase ) if isinstance(__lowercase , __lowercase ) else 1 # get prompt text embeddings lowerCAmelCase_ : Optional[Any] = self.tokenizer( __lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[int] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCAmelCase_ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowerCAmelCase_ : Dict = text_input_ids[:, : self.tokenizer.model_max_length] lowerCAmelCase_ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 lowerCAmelCase_ : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowercase ) # duplicate text embeddings for each generation per prompt lowerCAmelCase_ : Optional[Any] = prompt_embeds.repeat_interleave(__lowercase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: lowerCAmelCase_ : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings lowerCAmelCase_ : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowercase , 1 , 1 ) else: lowerCAmelCase_ : Tuple = [''''''] * batch_size lowerCAmelCase_ : Optional[Any] = text_input_ids.shape[-1] lowerCAmelCase_ : Any = self.tokenizer( __lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings lowerCAmelCase_ : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowercase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ : Any = negative_prompt_embeds.shape[1] lowerCAmelCase_ : List[str] = negative_prompt_embeds.repeat(1 , __lowercase , 1 ) lowerCAmelCase_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCAmelCase_ : Dict = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , __lowercase , __lowercase = 1_0_0 , __lowercase = 5.0 , __lowercase = 1.0 , __lowercase = 1 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , ) -> Union[ImagePipelineOutput, Tuple]: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = 1 elif isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : List[str] = len(__lowercase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__lowercase )}""" ) lowerCAmelCase_ : Tuple = batch_size * num_images_per_prompt lowerCAmelCase_ : List[Any] = guidance_scale > 1.0 lowerCAmelCase_ : str = self._encode_prompt(__lowercase , __lowercase , __lowercase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__lowercase )}.""" ) # get the initial completely masked latents unless the user supplied it lowerCAmelCase_ : List[str] = (batch_size, self.transformer.num_latent_pixels) if latents is None: lowerCAmelCase_ : Any = self.transformer.num_vector_embeds - 1 lowerCAmelCase_ : Any = torch.full(__lowercase , __lowercase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) lowerCAmelCase_ : List[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__lowercase , device=self.device ) lowerCAmelCase_ : Dict = self.scheduler.timesteps.to(self.device ) lowerCAmelCase_ : Dict = latents for i, t in enumerate(self.progress_bar(__lowercase ) ): # expand the sample if we are doing classifier free guidance lowerCAmelCase_ : Tuple = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` lowerCAmelCase_ : int = self.transformer(__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase ).sample if do_classifier_free_guidance: lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = model_output.chunk(2 ) lowerCAmelCase_ : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__lowercase , dim=1 , keepdim=__lowercase ) lowerCAmelCase_ : Optional[int] = self.truncate(__lowercase , __lowercase ) # remove `log(0)`'s (`-inf`s) lowerCAmelCase_ : Any = model_output.clamp(-7_0 ) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase_ : Dict = self.scheduler.step(__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowercase , __lowercase , __lowercase ) lowerCAmelCase_ : List[Any] = self.vqvae.config.vq_embed_dim lowerCAmelCase_ : Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) lowerCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(__lowercase , shape=__lowercase ) lowerCAmelCase_ : Optional[Any] = self.vqvae.decode(__lowercase , force_not_quantize=__lowercase ).sample lowerCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ : Union[str, Any] = self.numpy_to_pil(__lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowercase ) def lowercase_ ( self , __lowercase , __lowercase ) -> torch.FloatTensor: lowerCAmelCase_ , lowerCAmelCase_ : int = torch.sort(__lowercase , 1 , descending=__lowercase ) lowerCAmelCase_ : Dict = torch.exp(__lowercase ) lowerCAmelCase_ : List[str] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out lowerCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , __lowercase ) lowerCAmelCase_ : int = torch.cat((all_true, keep_mask) , dim=1 ) lowerCAmelCase_ : Optional[Any] = keep_mask[:, :-1, :] lowerCAmelCase_ : Dict = keep_mask.gather(1 , indices.argsort(1 ) ) lowerCAmelCase_ : int = log_p_x_0.clone() lowerCAmelCase_ : List[str] = -torch.inf # -inf = log(0) return rv
619
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class snake_case__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , ) -> List[Any]: lowerCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8} lowerCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} lowerCAmelCase_ : Any = parent lowerCAmelCase_ : List[str] = batch_size lowerCAmelCase_ : Dict = num_channels lowerCAmelCase_ : Optional[Any] = image_size lowerCAmelCase_ : Optional[Any] = min_resolution lowerCAmelCase_ : int = max_resolution lowerCAmelCase_ : Tuple = do_resize lowerCAmelCase_ : Dict = size lowerCAmelCase_ : Optional[Any] = do_center_crop lowerCAmelCase_ : Tuple = crop_size lowerCAmelCase_ : int = do_normalize lowerCAmelCase_ : int = image_mean lowerCAmelCase_ : Any = image_std def lowercase_ ( self ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = LevitImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = LevitImageProcessingTester(self ) @property def lowercase_ ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) lowerCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def lowercase_ ( self ) -> Any: pass def lowercase_ ( self ) -> Tuple: # Initialize image_processing lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input lowerCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : List[str] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self ) -> Tuple: # Initialize image_processing lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : Union[str, Any] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self ) -> int: # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : Optional[Any] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
619
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 )-> str: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 lowerCAmelCase_ : Tuple = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1 lowerCAmelCase_ : str = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple lowerCAmelCase_ : Dict = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowerCAmelCase_ : str = logits[0, masked_index, :] lowerCAmelCase_ : Optional[int] = logits.softmax(dim=0 ) lowerCAmelCase_ , lowerCAmelCase_ : List[str] = prob.topk(k=lowerCAmelCase_ , dim=0 ) lowerCAmelCase_ : Tuple = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] ) lowerCAmelCase_ : Any = tokenizer.mask_token lowerCAmelCase_ : List[Any] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): lowerCAmelCase_ : str = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(lowerCAmelCase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(lowerCAmelCase_ ) , lowerCAmelCase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _UpperCAmelCase : Union[str, Any] =CamembertTokenizer.from_pretrained("""camembert-base""") _UpperCAmelCase : Optional[int] =CamembertForMaskedLM.from_pretrained("""camembert-base""") model.eval() _UpperCAmelCase : List[Any] ="""Le camembert est <mask> :)""" print(fill_mask(masked_input, model, tokenizer, topk=3))
619
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : str = {} lowerCAmelCase_ : Dict = job['''started_at'''] lowerCAmelCase_ : str = job['''completed_at'''] lowerCAmelCase_ : Dict = date_parser.parse(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = date_parser.parse(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCAmelCase_ : Any = start lowerCAmelCase_ : List[Any] = end lowerCAmelCase_ : str = duration_in_min return job_info def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> str: lowerCAmelCase_ : Tuple = None if token is not None: lowerCAmelCase_ : List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} lowerCAmelCase_ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowerCAmelCase_ : List[Any] = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() lowerCAmelCase_ : Any = {} try: job_time.update({job['''name''']: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result['''jobs''']} ) lowerCAmelCase_ : List[str] = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase_ : Any = requests.get(url + f"""&page={i + 2}""" , headers=SCREAMING_SNAKE_CASE_ ).json() job_time.update({job['''name''']: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result['''jobs''']} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": _UpperCAmelCase : int =argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") _UpperCAmelCase : int =parser.parse_args() _UpperCAmelCase : Optional[int] =get_job_time(args.workflow_run_id) _UpperCAmelCase : Optional[Any] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v['duration']}""")
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
from __future__ import annotations import numpy as np def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: return np.maximum(0 , __A ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] ={ 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class snake_case__( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """unispeech-sat""" def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(1_0, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase=3_2_0 , __lowercase=2 , __lowercase=0.1 , __lowercase=1_0_0 , __lowercase=2_5_6 , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=5_1_2 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=5_0_4 , **__lowercase , ) -> Any: super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCAmelCase_ : Dict = hidden_size lowerCAmelCase_ : Optional[int] = feat_extract_norm lowerCAmelCase_ : Optional[int] = feat_extract_activation lowerCAmelCase_ : List[Any] = list(UpperCAmelCase_ ) lowerCAmelCase_ : List[Any] = list(UpperCAmelCase_ ) lowerCAmelCase_ : int = list(UpperCAmelCase_ ) lowerCAmelCase_ : Tuple = conv_bias lowerCAmelCase_ : str = num_conv_pos_embeddings lowerCAmelCase_ : List[str] = num_conv_pos_embedding_groups lowerCAmelCase_ : Any = len(self.conv_dim ) lowerCAmelCase_ : List[Any] = num_hidden_layers lowerCAmelCase_ : str = intermediate_size lowerCAmelCase_ : int = hidden_act lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : List[Any] = hidden_dropout lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Any = activation_dropout lowerCAmelCase_ : List[str] = feat_proj_dropout lowerCAmelCase_ : Any = final_dropout lowerCAmelCase_ : Any = layerdrop lowerCAmelCase_ : Union[str, Any] = layer_norm_eps lowerCAmelCase_ : Any = initializer_range lowerCAmelCase_ : Union[str, Any] = vocab_size lowerCAmelCase_ : List[str] = num_clusters lowerCAmelCase_ : Tuple = do_stable_layer_norm lowerCAmelCase_ : Dict = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ : Dict = apply_spec_augment lowerCAmelCase_ : Tuple = mask_time_prob lowerCAmelCase_ : Tuple = mask_time_length lowerCAmelCase_ : Dict = mask_time_min_masks lowerCAmelCase_ : Any = mask_feature_prob lowerCAmelCase_ : List[Any] = mask_feature_length lowerCAmelCase_ : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase_ : Optional[Any] = num_codevectors_per_group lowerCAmelCase_ : List[Any] = num_codevector_groups lowerCAmelCase_ : Optional[Any] = contrastive_logits_temperature lowerCAmelCase_ : Union[str, Any] = feat_quantizer_dropout lowerCAmelCase_ : List[str] = num_negatives lowerCAmelCase_ : Tuple = codevector_dim lowerCAmelCase_ : List[str] = proj_codevector_dim lowerCAmelCase_ : Optional[int] = diversity_loss_weight # ctc loss lowerCAmelCase_ : Dict = ctc_loss_reduction lowerCAmelCase_ : List[Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase_ : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase_ : Optional[Any] = list(UpperCAmelCase_ ) lowerCAmelCase_ : List[str] = list(UpperCAmelCase_ ) lowerCAmelCase_ : str = list(UpperCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = xvector_output_dim @property def lowercase_ ( self ) -> List[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionXLImgaImgPipeline SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} SCREAMING_SNAKE_CASE__ : Any = PipelineTesterMixin.required_optional_params - {"latents"} SCREAMING_SNAKE_CASE__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) lowerCAmelCase_ : Optional[int] = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) lowerCAmelCase_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , ) lowerCAmelCase_ : int = CLIPTextModel(A_ ) lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=A_ ) lowerCAmelCase_ : Dict = CLIPTextModelWithProjection(A_ ) lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=A_ ) lowerCAmelCase_ : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Optional[Any]: lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ ) lowerCAmelCase_ : Optional[Any] = image / 2 + 0.5 if str(A_ ).startswith('''mps''' ): lowerCAmelCase_ : List[str] = torch.manual_seed(A_ ) else: lowerCAmelCase_ : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) lowerCAmelCase_ : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ : int = self.get_dummy_components() lowerCAmelCase_ : Any = StableDiffusionXLImgaImgPipeline(**A_ ) lowerCAmelCase_ : Any = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : Any = self.get_dummy_inputs(A_ ) lowerCAmelCase_ : int = sd_pipe(**A_ ).images lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Optional[Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowercase_ ( self ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase_ ( self ) -> Optional[Any]: pass def lowercase_ ( self ) -> str: lowerCAmelCase_ : Optional[Any] = self.get_dummy_components() lowerCAmelCase_ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**A_ ) lowerCAmelCase_ : Any = sd_pipe.to(A_ ) lowerCAmelCase_ : List[Any] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) # forward without prompt embeds lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(A_ ) lowerCAmelCase_ : Tuple = 3 * ['''this is a negative prompt'''] lowerCAmelCase_ : List[Any] = negative_prompt lowerCAmelCase_ : Tuple = 3 * [inputs['''prompt''']] lowerCAmelCase_ : List[str] = sd_pipe(**A_ ) lowerCAmelCase_ : Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCAmelCase_ : Any = self.get_dummy_inputs(A_ ) lowerCAmelCase_ : int = 3 * ['''this is a negative prompt'''] lowerCAmelCase_ : str = 3 * [inputs.pop('''prompt''' )] ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : Any = sd_pipe.encode_prompt(A_ , negative_prompt=A_ ) lowerCAmelCase_ : int = sd_pipe( **A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , ) lowerCAmelCase_ : str = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ) -> Any: lowerCAmelCase_ : List[str] = torch.Generator(device=A_ ).manual_seed(A_ ) lowerCAmelCase_ : Optional[Any] = np.random.RandomState(A_ ).standard_normal((1, 4, 6_4, 6_4) ) lowerCAmelCase_ : Any = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) lowerCAmelCase_ : Tuple = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : str = self.get_inputs(A_ ) lowerCAmelCase_ : int = pipe(**A_ ).images lowerCAmelCase_ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _UpperCAmelCase : Dict =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = 3_2 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None: lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : List[Any] = do_rescale lowerCAmelCase_ : Tuple = size_divisor lowerCAmelCase_ : Tuple = resample super().__init__(**UpperCamelCase_ ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase ) -> np.ndarray: lowerCAmelCase_ : Any = get_image_size(UpperCamelCase_ ) # Rounds the height and width down to the closest multiple of size_divisor lowerCAmelCase_ : Union[str, Any] = height // size_divisor * size_divisor lowerCAmelCase_ : Optional[Any] = width // size_divisor * size_divisor lowerCAmelCase_ : Dict = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) return image def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase ) -> np.ndarray: return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature: lowerCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor lowerCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) lowerCAmelCase_ : Optional[Any] = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : int = [to_numpy_array(UpperCamelCase_ ) for img in images] if do_resize: lowerCAmelCase_ : str = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase_ : Optional[int] = [self.rescale(UpperCamelCase_ , scale=1 / 2_5_5 ) for image in images] lowerCAmelCase_ : Optional[int] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase_ : int = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _UpperCAmelCase : Tuple ={ "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Union[str, Any] =[ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _UpperCAmelCase =logging.get_logger(__name__) _UpperCAmelCase ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase ={ """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase ={ """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class snake_case__( __lowerCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Tuple = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE__ : List[str] = RobertaTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ) -> Optional[Any]: super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase_ ) != add_prefix_space: lowerCAmelCase_ : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop('''type''' ) ) lowerCAmelCase_ : Optional[Any] = add_prefix_space lowerCAmelCase_ : List[str] = pre_tok_class(**UpperCAmelCase_ ) lowerCAmelCase_ : str = add_prefix_space lowerCAmelCase_ : Optional[int] = 'post_processor' lowerCAmelCase_ : Dict = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) if tokenizer_component_instance: lowerCAmelCase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase_ : Optional[int] = tuple(state['''sep'''] ) if "cls" in state: lowerCAmelCase_ : Tuple = tuple(state['''cls'''] ) lowerCAmelCase_ : Union[str, Any] = False if state.get('''add_prefix_space''' , UpperCAmelCase_ ) != add_prefix_space: lowerCAmelCase_ : int = add_prefix_space lowerCAmelCase_ : List[str] = True if state.get('''trim_offsets''' , UpperCAmelCase_ ) != trim_offsets: lowerCAmelCase_ : Tuple = trim_offsets lowerCAmelCase_ : List[str] = True if changes_to_apply: lowerCAmelCase_ : Tuple = getattr(UpperCAmelCase_ , state.pop('''type''' ) ) lowerCAmelCase_ : Any = component_class(**UpperCAmelCase_ ) setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) @property def lowercase_ ( self ) -> Union[str, Any]: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value lowerCAmelCase_ : Any = value def lowercase_ ( self , *__lowercase , **__lowercase ) -> str: lowerCAmelCase_ : str = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowercase_ ( self , *__lowercase , **__lowercase ) -> Optional[int]: lowerCAmelCase_ : Dict = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowercase_ ( self , __lowercase , __lowercase = None ) -> Optional[Any]: lowerCAmelCase_ : Tuple = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def lowercase_ ( self , __lowercase , __lowercase=None ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self , __lowercase , __lowercase = None ) -> Dict: lowerCAmelCase_ : Any = [self.sep_token_id] lowerCAmelCase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : List[str] =logging.get_logger(__name__) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: lowerCAmelCase_ : Dict = b.T lowerCAmelCase_ : str = np.sum(np.square(lowerCAmelCase_ ) , axis=1 ) lowerCAmelCase_ : Optional[Any] = np.sum(np.square(lowerCAmelCase_ ) , axis=0 ) lowerCAmelCase_ : Optional[int] = np.matmul(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = aa[:, None] - 2 * ab + ba[None, :] return d def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: lowerCAmelCase_ : List[Any] = x.reshape(-1 , 3 ) lowerCAmelCase_ : Union[str, Any] = squared_euclidean_distance(lowerCAmelCase_ , lowerCAmelCase_ ) return np.argmin(lowerCAmelCase_ , axis=1 ) class snake_case__( UpperCAmelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] def __init__( self , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = True , **__lowercase , ) -> None: super().__init__(**_lowercase ) lowerCAmelCase_ : Optional[Any] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6} lowerCAmelCase_ : Optional[Any] = get_size_dict(_lowercase ) lowerCAmelCase_ : List[str] = np.array(_lowercase ) if clusters is not None else None lowerCAmelCase_ : Dict = do_resize lowerCAmelCase_ : Dict = size lowerCAmelCase_ : Optional[int] = resample lowerCAmelCase_ : Dict = do_normalize lowerCAmelCase_ : Tuple = do_color_quantize def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : List[Any] = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" ) return resize( _lowercase , size=(size['''height'''], size['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = rescale(image=_lowercase , scale=1 / 1_2_7.5 , data_format=_lowercase ) lowerCAmelCase_ : Optional[Any] = image - 1 return image def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Optional[Any] = size if size is not None else self.size lowerCAmelCase_ : Any = get_size_dict(_lowercase ) lowerCAmelCase_ : Any = resample if resample is not None else self.resample lowerCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize lowerCAmelCase_ : Union[str, Any] = clusters if clusters is not None else self.clusters lowerCAmelCase_ : Dict = np.array(_lowercase ) lowerCAmelCase_ : str = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Tuple = [to_numpy_array(_lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : str = [self.normalize(image=_lowercase ) for image in images] if do_color_quantize: lowerCAmelCase_ : Tuple = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) lowerCAmelCase_ : List[Any] = np.array(_lowercase ) lowerCAmelCase_ : Optional[int] = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) lowerCAmelCase_ : Optional[int] = images.shape[0] lowerCAmelCase_ : Optional[Any] = images.reshape(_lowercase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. lowerCAmelCase_ : List[str] = list(_lowercase ) else: lowerCAmelCase_ : List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] lowerCAmelCase_ : int = {'input_ids': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
from __future__ import annotations import pandas as pd def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]: lowerCAmelCase_ : Tuple = [0] * no_of_processes lowerCAmelCase_ : Any = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(__A ): lowerCAmelCase_ : List[str] = burst_time[i] lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : str = 999_999_999 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Tuple = False # Process until all processes are completed while complete != no_of_processes: for j in range(__A ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: lowerCAmelCase_ : Optional[Any] = remaining_time[j] lowerCAmelCase_ : Optional[int] = j lowerCAmelCase_ : Union[str, Any] = True if not check: increment_time += 1 continue remaining_time[short] -= 1 lowerCAmelCase_ : Optional[int] = remaining_time[short] if minm == 0: lowerCAmelCase_ : Optional[Any] = 999_999_999 if remaining_time[short] == 0: complete += 1 lowerCAmelCase_ : Optional[int] = False # Find finish time of current process lowerCAmelCase_ : Dict = increment_time + 1 # Calculate waiting time lowerCAmelCase_ : Tuple = finish_time - arrival_time[short] lowerCAmelCase_ : int = finar - burst_time[short] if waiting_time[short] < 0: lowerCAmelCase_ : List[Any] = 0 # Increment time increment_time += 1 return waiting_time def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]: lowerCAmelCase_ : List[Any] = [0] * no_of_processes for i in range(__A ): lowerCAmelCase_ : Union[str, Any] = burst_time[i] + waiting_time[i] return turn_around_time def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Tuple = 0 for i in range(__A ): lowerCAmelCase_ : Any = total_waiting_time + waiting_time[i] lowerCAmelCase_ : Tuple = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print('''Average turn around time =''' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") _UpperCAmelCase : Tuple =int(input()) _UpperCAmelCase : Dict =[0] * no_of_processes _UpperCAmelCase : Any =[0] * no_of_processes _UpperCAmelCase : Any =list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) _UpperCAmelCase , _UpperCAmelCase : Optional[int] =map(int, input().split()) _UpperCAmelCase : str =calculate_waitingtime(arrival_time, burst_time, no_of_processes) _UpperCAmelCase : str =burst_time _UpperCAmelCase : Optional[Any] =no_of_processes _UpperCAmelCase : Optional[int] =waiting_time _UpperCAmelCase : str =calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) _UpperCAmelCase : str =pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class snake_case__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , ) -> List[str]: lowerCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8} lowerCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} lowerCAmelCase_ : str = parent lowerCAmelCase_ : Tuple = batch_size lowerCAmelCase_ : Optional[int] = num_channels lowerCAmelCase_ : List[Any] = image_size lowerCAmelCase_ : Any = min_resolution lowerCAmelCase_ : str = max_resolution lowerCAmelCase_ : List[str] = do_resize lowerCAmelCase_ : int = size lowerCAmelCase_ : Tuple = do_center_crop lowerCAmelCase_ : Optional[Any] = crop_size lowerCAmelCase_ : Optional[int] = do_normalize lowerCAmelCase_ : int = image_mean lowerCAmelCase_ : Dict = image_std def lowercase_ ( self ) -> int: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = LevitImageProcessor if is_vision_available() else None def lowercase_ ( self ) -> str: lowerCAmelCase_ : int = LevitImageProcessingTester(self ) @property def lowercase_ ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Optional[Any]: # Initialize image_processing lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : Tuple = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self ) -> List[str]: # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : Tuple = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowercase_ ( self ) -> List[str]: # Initialize image_processing lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase_ : Any = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCAmelCase : List[Any] =logging.getLogger(__name__) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=UpperCAmelCase__, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default="""NER""", metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=UpperCAmelCase__, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ : bool = field(default=UpperCAmelCase__, metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=UpperCAmelCase__, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=UpperCAmelCase__, metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""}, ) SCREAMING_SNAKE_CASE__ : int = field( default=128, metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) }, ) SCREAMING_SNAKE_CASE__ : bool = field( default=UpperCAmelCase__, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def lowerCAmelCase ( )-> str: lowerCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) lowerCAmelCase_ : List[str] = import_module('''tasks''' ) try: lowerCAmelCase_ : Optional[int] = getattr(lowerCamelCase__ , model_args.task_type ) lowerCAmelCase_ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , lowerCamelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowerCAmelCase_ : Any = token_classification_task.get_labels(data_args.labels ) lowerCAmelCase_ : Dict[int, str] = dict(enumerate(lowerCamelCase__ ) ) lowerCAmelCase_ : Any = len(lowerCamelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowerCAmelCase_ : Union[str, Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase_ : Tuple = ( TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase_ : int = ( TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[List[int], List[int]]: lowerCAmelCase_ : Tuple = np.argmax(lowerCamelCase__ , axis=2 ) lowerCAmelCase_ : str = preds.shape lowerCAmelCase_ : List[Any] = [[] for _ in range(lowerCamelCase__ )] lowerCAmelCase_ : Union[str, Any] = [[] for _ in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(lowerCAmelCase_ ) -> Dict: lowerCAmelCase_ : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(lowerCamelCase__ , lowerCamelCase__ ), "precision": precision_score(lowerCamelCase__ , lowerCamelCase__ ), "recall": recall_score(lowerCamelCase__ , lowerCamelCase__ ), "f1": fa_score(lowerCamelCase__ , lowerCamelCase__ ), } # Data collator lowerCAmelCase_ : Dict = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase_ : List[str] = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase_ : List[str] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCAmelCase_ : Tuple = trainer.evaluate() lowerCAmelCase_ : List[str] = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , lowerCamelCase__ , lowerCamelCase__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(lowerCamelCase__ ) # Predict if training_args.do_predict: lowerCAmelCase_ : List[Any] = TokenClassificationDataset( token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowerCAmelCase_ : List[Any] = trainer.predict(lowerCamelCase__ ) lowerCAmelCase_ : int = align_predictions(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase_ : str = os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , lowerCamelCase__ , lowerCamelCase__ ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions lowerCAmelCase_ : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return results def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: main() if __name__ == "__main__": main()
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : int ={ 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = """owlvit_text_model""" def __init__( self , __lowercase=4_9_4_0_8 , __lowercase=5_1_2 , __lowercase=2_0_4_8 , __lowercase=1_2 , __lowercase=8 , __lowercase=1_6 , __lowercase="quick_gelu" , __lowercase=1e-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=0 , __lowercase=4_9_4_0_6 , __lowercase=4_9_4_0_7 , **__lowercase , ) -> Optional[int]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ : Any = vocab_size lowerCAmelCase_ : List[Any] = hidden_size lowerCAmelCase_ : Dict = intermediate_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : Union[str, Any] = num_attention_heads lowerCAmelCase_ : List[Any] = max_position_embeddings lowerCAmelCase_ : Any = hidden_act lowerCAmelCase_ : int = layer_norm_eps lowerCAmelCase_ : Optional[Any] = attention_dropout lowerCAmelCase_ : Optional[int] = initializer_range lowerCAmelCase_ : Any = initializer_factor @classmethod def lowercase_ ( cls , __lowercase , **__lowercase ) -> Any: cls._set_token_in_kwargs(__A ) lowerCAmelCase_ : Optional[Any] = cls.get_config_dict(__A , **__A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": lowerCAmelCase_ : int = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """owlvit_vision_model""" def __init__( self , __lowercase=7_6_8 , __lowercase=3_0_7_2 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3 , __lowercase=7_6_8 , __lowercase=3_2 , __lowercase="quick_gelu" , __lowercase=1e-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , **__lowercase , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : Optional[int] = intermediate_size lowerCAmelCase_ : Optional[int] = num_hidden_layers lowerCAmelCase_ : Union[str, Any] = num_attention_heads lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : Union[str, Any] = image_size lowerCAmelCase_ : Optional[Any] = patch_size lowerCAmelCase_ : List[Any] = hidden_act lowerCAmelCase_ : Dict = layer_norm_eps lowerCAmelCase_ : Any = attention_dropout lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Optional[Any] = initializer_factor @classmethod def lowercase_ ( cls , __lowercase , **__lowercase ) -> Tuple: cls._set_token_in_kwargs(__A ) lowerCAmelCase_ : int = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": lowerCAmelCase_ : Dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = """owlvit""" SCREAMING_SNAKE_CASE__ : Any = True def __init__( self , __lowercase=None , __lowercase=None , __lowercase=5_1_2 , __lowercase=2.65_92 , __lowercase=True , **__lowercase , ) -> str: super().__init__(**__A ) if text_config is None: lowerCAmelCase_ : Any = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: lowerCAmelCase_ : List[Any] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) lowerCAmelCase_ : Any = OwlViTTextConfig(**__A ) lowerCAmelCase_ : str = OwlViTVisionConfig(**__A ) lowerCAmelCase_ : Tuple = projection_dim lowerCAmelCase_ : Union[str, Any] = logit_scale_init_value lowerCAmelCase_ : int = return_dict lowerCAmelCase_ : Optional[int] = 1.0 @classmethod def lowercase_ ( cls , __lowercase , **__lowercase ) -> Union[str, Any]: cls._set_token_in_kwargs(__A ) lowerCAmelCase_ : Optional[Any] = cls.get_config_dict(__A , **__A ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> str: lowerCAmelCase_ : List[Any] = {} lowerCAmelCase_ : List[str] = text_config lowerCAmelCase_ : str = vision_config return cls.from_dict(__A , **__A ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : Optional[int] = self.text_config.to_dict() lowerCAmelCase_ : Optional[int] = self.vision_config.to_dict() lowerCAmelCase_ : Dict = self.__class__.model_type return output class snake_case__( UpperCAmelCase__ ): '''simple docstring''' @property def lowercase_ ( self ) -> Union[str, Any]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def lowercase_ ( self ) -> List[Any]: return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def lowercase_ ( self ) -> Any: return 1e-4 def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = None , ) -> Dict: lowerCAmelCase_ : int = super().generate_dummy_inputs( processor.tokenizer , batch_size=__A , seq_length=__A , framework=__A ) lowerCAmelCase_ : List[Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=__A , framework=__A ) return {**text_input_dict, **image_input_dict} @property def lowercase_ ( self ) -> Dict: return 1_4
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str: return "\n".join( f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : Union[str, Any] =get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case__( _snake_case, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = GPTSwaTokenizer SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ : Tuple = GPTSwaTokenizer(__lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : str = '''This is a test''' lowerCAmelCase_ : Any = '''This is a test''' return input_text, output_text def lowercase_ ( self ) -> str: lowerCAmelCase_ : Dict = '''<s>''' lowerCAmelCase_ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__lowercase ) , 2_0_0_0 ) def lowercase_ ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Any = GPTSwaTokenizer(__lowercase ) lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] ) lowerCAmelCase_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( __lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual( __lowercase , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase ) # fmt: off self.assertListEqual( __lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[int] = GPTSwaTokenizer(__lowercase ) lowerCAmelCase_ : str = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] lowerCAmelCase_ : int = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__lowercase , __lowercase ): self.assertListEqual(tokenizer.encode_fast(__lowercase ) , __lowercase ) # Test that decode_fast returns the input text for text, token_ids in zip(__lowercase , __lowercase ): self.assertEqual(tokenizer.decode_fast(__lowercase ) , __lowercase ) @slow def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : str = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off lowerCAmelCase_ : Optional[int] = {'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__lowercase , )
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : str =logging.get_logger(__name__) _UpperCAmelCase : List[str] =[ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCAmelCase ( lowerCAmelCase_ )-> Any: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowerCAmelCase_ : List[str] = k.replace(lowerCAmelCase__ , lowerCAmelCase__ ) if k.startswith('''encoder''' ): lowerCAmelCase_ : Any = k.replace('''.attn''' , '''.self_attn''' ) lowerCAmelCase_ : str = k.replace('''norm1''' , '''self_attn_layer_norm''' ) lowerCAmelCase_ : Any = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): lowerCAmelCase_ : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' ) lowerCAmelCase_ : Any = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) lowerCAmelCase_ : Tuple = k.replace('''norm3''' , '''final_layer_norm''' ) return k def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Tuple = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: lowerCAmelCase_ : List[Any] = sd.pop(lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd lowerCAmelCase_ : Dict = v _UpperCAmelCase : List[str] =["""START"""] @torch.no_grad() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: lowerCAmelCase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) lowerCAmelCase_ : Dict = model['''model'''] lowerCAmelCase_ : List[Any] = BlenderbotConfig.from_json_file(lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = BlenderbotForConditionalGeneration(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = m.model.state_dict().keys() lowerCAmelCase_ : Tuple = [] lowerCAmelCase_ : List[str] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowerCAmelCase_ : str = rename_state_dict_key(lowerCAmelCase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowerCAmelCase_ : List[str] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCAmelCase__ ) m.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) m.half() m.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) _UpperCAmelCase : Any =parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str: _enforce_args(__A , __A ) if n == 0: return 0 lowerCAmelCase_ : Tuple = float('''-inf''' ) for i in range(1 , n + 1 ): lowerCAmelCase_ : Any = max( __A , prices[i - 1] + naive_cut_rod_recursive(n - i , __A ) ) return max_revue def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: _enforce_args(__A , __A ) lowerCAmelCase_ : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__A , __A , __A ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCAmelCase_ : int = float('''-inf''' ) for i in range(1 , n + 1 ): lowerCAmelCase_ : Optional[Any] = max( __A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __A , __A ) , ) lowerCAmelCase_ : Optional[Any] = max_revenue return max_rev[n] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: _enforce_args(__A , __A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCAmelCase_ : List[Any] = [float('''-inf''' ) for _ in range(n + 1 )] lowerCAmelCase_ : Any = 0 for i in range(1 , n + 1 ): lowerCAmelCase_ : Any = max_rev[i] for j in range(1 , i + 1 ): lowerCAmelCase_ : List[Any] = max(__A , prices[j - 1] + max_rev[i - j] ) lowerCAmelCase_ : int = max_revenue_i return max_rev[n] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: if n < 0: lowerCAmelCase_ : Any = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__A ) if n > len(__A ): lowerCAmelCase_ : List[Any] = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(__A )}""" ) raise ValueError(__A ) def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : str = [6, 10, 12, 15, 20, 23] lowerCAmelCase_ : List[str] = len(__A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCAmelCase_ : Tuple = 36 lowerCAmelCase_ : Any = top_down_cut_rod(__A , __A ) lowerCAmelCase_ : Dict = bottom_up_cut_rod(__A , __A ) lowerCAmelCase_ : List[str] = naive_cut_rod_recursive(__A , __A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( a__ ): '''simple docstring''' def __init__( self , *__lowercase , **__lowercase ) -> Optional[Any]: warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 ): lowerCAmelCase_ : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _lowerCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Tuple ={"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED _UpperCAmelCase : int ={ "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } _UpperCAmelCase : List[str] ={ "allenai/led-base-16384": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCAmelCase ( )-> Tuple: lowerCAmelCase_ : Any = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase_ : int = bs[:] lowerCAmelCase_ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(a_ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase_ : Optional[int] = [chr(a_ ) for n in cs] return dict(zip(a_ , a_ ) ) def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Optional[int] = set() lowerCAmelCase_ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase_ : Any = char return pairs class snake_case__( _UpperCAmelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> List[Any]: lowerCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token lowerCAmelCase_ : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token lowerCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase_ : Optional[Any] = json.load(lowerCamelCase_ ) lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase_ : str = errors # how to handle errors in decoding lowerCAmelCase_ : List[str] = bytes_to_unicode() lowerCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase_ : List[str] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase_ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase_ : Dict = {} lowerCAmelCase_ : int = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase_ : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowercase_ ( self ) -> str: return len(self.encoder ) def lowercase_ ( self ) -> Any: return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self , __lowercase ) -> List[str]: if token in self.cache: return self.cache[token] lowerCAmelCase_ : List[Any] = tuple(lowerCamelCase_ ) lowerCAmelCase_ : Tuple = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: lowerCAmelCase_ : Optional[Any] = min(lowerCamelCase_ , key=lambda __lowercase : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase_ : List[Any] = bigram lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : str = 0 while i < len(lowerCamelCase_ ): try: lowerCAmelCase_ : Tuple = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase_ : str = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase_ : Any = tuple(lowerCamelCase_ ) lowerCAmelCase_ : int = new_word if len(lowerCamelCase_ ) == 1: break else: lowerCAmelCase_ : Optional[int] = get_pairs(lowerCamelCase_ ) lowerCAmelCase_ : int = ''' '''.join(lowerCamelCase_ ) lowerCAmelCase_ : Optional[Any] = word return word def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : str = [] for token in re.findall(self.pat , lowerCamelCase_ ): lowerCAmelCase_ : Optional[Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) ) return bpe_tokens def lowercase_ ( self , __lowercase ) -> Dict: return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowercase_ ( self , __lowercase ) -> Union[str, Any]: return self.decoder.get(lowerCamelCase_ ) def lowercase_ ( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = ''''''.join(lowerCamelCase_ ) lowerCAmelCase_ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ : Union[str, Any] = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' ) lowerCAmelCase_ : Any = 0 with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase_ : Union[str, Any] = token_index writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase_ : str = [self.cls_token_id] lowerCAmelCase_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : List[str] = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , __lowercase , __lowercase=False , **__lowercase ) -> Optional[Any]: lowerCAmelCase_ : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): lowerCAmelCase_ : List[Any] = ''' ''' + text return (text, kwargs) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = PaddingStrategy.DO_NOT_PAD , __lowercase = None , __lowercase = None , ) -> dict: lowerCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase_ : List[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase_ : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase_ : str = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase_ ) if needs_to_be_padded: lowerCAmelCase_ : Any = len(lowerCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase_ : Any = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase_ : Any = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
from importlib import import_module from .logging import get_logger _UpperCAmelCase : List[Any] =get_logger(__name__) class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=None ) -> List[str]: lowerCAmelCase_ : List[str] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) ) lowerCAmelCase_ : Optional[int] = module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [] def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = obj lowerCAmelCase_ : int = target lowerCAmelCase_ : Optional[Any] = new lowerCAmelCase_ : Any = target.split('''.''' )[0] lowerCAmelCase_ : Dict = {} lowerCAmelCase_ : List[Any] = attrs or [] def __enter__( self ) -> Optional[int]: *lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(snake_case_ ) ): try: lowerCAmelCase_ : Tuple = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): lowerCAmelCase_ : str = getattr(self.obj , snake_case_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): lowerCAmelCase_ : Optional[int] = obj_attr # patch at top level setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) ) lowerCAmelCase_ : Dict = getattr(self.obj , snake_case_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) ) lowerCAmelCase_ : Dict = getattr(snake_case_ , snake_case_ ) # finally set the target attribute setattr(snake_case_ , snake_case_ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: lowerCAmelCase_ : List[str] = getattr(import_module('''.'''.join(snake_case_ ) ) , snake_case_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , snake_case_ ) is attr_value: lowerCAmelCase_ : str = getattr(self.obj , snake_case_ ) setattr(self.obj , snake_case_ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" lowerCAmelCase_ : Any = globals()['''__builtins__'''][target_attr] setattr(self.obj , snake_case_ , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__lowercase ) -> str: for attr in list(self.original ): setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) ) def lowercase_ ( self ) -> Dict: self.__enter__() self._active_patches.append(self ) def lowercase_ ( self ) -> str: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0
class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = name lowerCAmelCase_ : str = val def __str__( self ) -> Dict: return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , __lowercase ) -> Tuple: return self.val < other.val class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> Optional[int]: lowerCAmelCase_ : List[Any] = {} lowerCAmelCase_ : Dict = {} lowerCAmelCase_ : str = self.build_heap(_lowercase ) def __getitem__( self , __lowercase ) -> List[Any]: return self.get_value(_lowercase ) def lowercase_ ( self , __lowercase ) -> Optional[int]: return (idx - 1) // 2 def lowercase_ ( self , __lowercase ) -> Any: return idx * 2 + 1 def lowercase_ ( self , __lowercase ) -> List[Any]: return idx * 2 + 2 def lowercase_ ( self , __lowercase ) -> Optional[int]: return self.heap_dict[key] def lowercase_ ( self , __lowercase ) -> Optional[int]: lowerCAmelCase_ : int = len(_lowercase ) - 1 lowerCAmelCase_ : Optional[int] = self.get_parent_idx(_lowercase ) for idx, i in enumerate(_lowercase ): lowerCAmelCase_ : Dict = idx lowerCAmelCase_ : Optional[int] = i.val for i in range(_lowercase , -1 , -1 ): self.sift_down(_lowercase , _lowercase ) return array def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple: while True: lowerCAmelCase_ : Optional[int] = self.get_left_child_idx(_lowercase ) # noqa: E741 lowerCAmelCase_ : Optional[Any] = self.get_right_child_idx(_lowercase ) lowerCAmelCase_ : Union[str, Any] = idx if l < len(_lowercase ) and array[l] < array[idx]: lowerCAmelCase_ : Dict = l if r < len(_lowercase ) and array[r] < array[smallest]: lowerCAmelCase_ : str = r if smallest != idx: lowerCAmelCase_ : str = array[smallest], array[idx] ( lowerCAmelCase_ ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowerCAmelCase_ : List[Any] = smallest else: break def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : Union[str, Any] = self.get_parent_idx(_lowercase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowerCAmelCase_ : Dict = self.heap[idx], self.heap[p] lowerCAmelCase_ : str = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowerCAmelCase_ : Union[str, Any] = p lowerCAmelCase_ : Any = self.get_parent_idx(_lowercase ) def lowercase_ ( self ) -> str: return self.heap[0] def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[str] = self.heap[-1], self.heap[0] lowerCAmelCase_ : List[str] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowerCAmelCase_ : Optional[int] = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self , __lowercase ) -> int: self.heap.append(_lowercase ) lowerCAmelCase_ : Optional[int] = len(self.heap ) - 1 lowerCAmelCase_ : int = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self ) -> int: return len(self.heap ) == 0 def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[int]: assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowerCAmelCase_ : List[Any] = new_value lowerCAmelCase_ : Any = new_value self.sift_up(self.idx_of_element[node] ) _UpperCAmelCase : Any =Node("""R""", -1) _UpperCAmelCase : List[Any] =Node("""B""", 6) _UpperCAmelCase : Union[str, Any] =Node("""A""", 3) _UpperCAmelCase : Any =Node("""X""", 1) _UpperCAmelCase : Tuple =Node("""E""", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _UpperCAmelCase : List[str] =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("""Min Heap - before decrease key""") for i in my_min_heap.heap: print(i) print("""Min Heap - After decrease key of node [B -> -17]""") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
721
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
0
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: lowerCAmelCase_ : str = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowerCAmelCase_ : Dict = json.loads(open(snake_case__ ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith('''.pt''' ): lowerCAmelCase_ : List[Any] = args.output + '''.pt''' lowerCAmelCase_ : str = OrderedDict() with tf.device('''/CPU:0''' ): lowerCAmelCase_ : Dict = tf.train.load_checkpoint(args.tf_model_dir ) lowerCAmelCase_ : Dict = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowerCAmelCase_ : List[Any] = reader.get_tensor(snake_case__ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowerCAmelCase_ : int = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowerCAmelCase_ : int = 8 lowerCAmelCase_ : Any = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowerCAmelCase_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Optional[Any] = torch.tensor(snake_case__ ) elif key_name.startswith('''model/moe''' ): lowerCAmelCase_ : Optional[Any] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowerCAmelCase_ : Any = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowerCAmelCase_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : List[Any] = torch.tensor(snake_case__ ) elif key_name.endswith('''/softmlp/kernel''' ): lowerCAmelCase_ : Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowerCAmelCase_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Optional[Any] = torch.tensor(snake_case__ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowerCAmelCase_ : Tuple = key_name[-9:-7] for i in range(16 ): lowerCAmelCase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowerCAmelCase_ : Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowerCAmelCase_ : Tuple = torch.tensor(snake_case__ ) elif key_name.startswith('''model/mlp''' ): lowerCAmelCase_ : Dict = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowerCAmelCase_ : Any = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowerCAmelCase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : str = torch.tensor(snake_case__ ) elif key_name.endswith('''/p1/bias''' ): lowerCAmelCase_ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowerCAmelCase_ : str = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : List[str] = torch.tensor(snake_case__ ) elif key_name.endswith('''/p2/kernel''' ): lowerCAmelCase_ : Any = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowerCAmelCase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Any = torch.tensor(snake_case__ ) elif key_name.endswith('''/p2/bias''' ): lowerCAmelCase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowerCAmelCase_ : Any = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) elif key_name.startswith('''model/ln''' ): lowerCAmelCase_ : str = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCAmelCase_ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player lowerCAmelCase_ : str = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Tuple = torch.tensor(snake_case__ ) elif key_name.endswith('''/g''' ): lowerCAmelCase_ : Tuple = '''model.blocks.%d.feed_forward.norm.weight''' % player lowerCAmelCase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) elif key_name.startswith('''model/att''' ): lowerCAmelCase_ : Dict = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowerCAmelCase_ : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowerCAmelCase_ : int = state[:, 0, :, :] lowerCAmelCase_ : str = state[:, 1, :, :] lowerCAmelCase_ : Tuple = state[:, 2, :, :] lowerCAmelCase_ : Tuple = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Dict = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : List[str] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowerCAmelCase_ : Dict = torch.tensor(snake_case__ ) lowerCAmelCase_ : List[str] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowerCAmelCase_ : Any = torch.tensor(snake_case__ ) lowerCAmelCase_ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowerCAmelCase_ : Union[str, Any] = torch.tensor(snake_case__ ) elif key_name.endswith('''/o/kernel''' ): lowerCAmelCase_ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowerCAmelCase_ : Tuple = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : List[Any] = torch.tensor(snake_case__ ) elif key_name.startswith('''model/an''' ): lowerCAmelCase_ : List[Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCAmelCase_ : Optional[int] = '''model.blocks.%d.self_attn.norm.bias''' % player lowerCAmelCase_ : Dict = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) elif key_name.endswith('''/g''' ): lowerCAmelCase_ : Optional[int] = '''model.blocks.%d.self_attn.norm.weight''' % player lowerCAmelCase_ : Optional[int] = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowerCAmelCase_ : Optional[Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowerCAmelCase_ : int = '''model.%s.weight''' % nlayer lowerCAmelCase_ : Optional[Any] = vnp.copy() # same in embedded lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) if key_name.startswith('''model/wte''' ): lowerCAmelCase_ : Dict = '''lm_head.weight''' lowerCAmelCase_ : Any = vnp.copy() # same in embedded lowerCAmelCase_ : Optional[Any] = torch.tensor(snake_case__ ) elif key_name.startswith('''model/wob''' ): lowerCAmelCase_ : Tuple = '''final_logits_bias''' lowerCAmelCase_ : str = vnp.copy() # same in embedded lowerCAmelCase_ : List[str] = state.reshape((1, -1) ) lowerCAmelCase_ : Dict = torch.tensor(snake_case__ ) elif key_name == "model/dense/kernel": lowerCAmelCase_ : Any = '''model.last_project.weight''' lowerCAmelCase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCAmelCase_ : Optional[int] = torch.tensor(snake_case__ ) elif key_name == "model/dense_1/bias": lowerCAmelCase_ : List[str] = '''model.last_project.bias''' lowerCAmelCase_ : int = vnp.copy() # same because it is one dimensional lowerCAmelCase_ : Any = torch.tensor(snake_case__ ) torch.save(snake_case__ , args.output ) if __name__ == "__main__": _UpperCAmelCase : Union[str, Any] =argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") _UpperCAmelCase : Union[str, Any] =parser.parse_args() convert_tf_gptsan_to_pt(args)
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class snake_case__( lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : jnp.ndarray SCREAMING_SNAKE_CASE__ : jnp.ndarray class snake_case__( nn.Module ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : Tuple[int] = (16, 32, 96, 256) SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : int = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCAmelCase_ : str = [] for i in range(len(self.block_out_channels ) - 1 ): lowerCAmelCase_ : List[str] = self.block_out_channels[i] lowerCAmelCase_ : Tuple = self.block_out_channels[i + 1] lowerCAmelCase_ : List[str] = nn.Conv( __lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__lowercase ) lowerCAmelCase_ : Any = nn.Conv( __lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = blocks lowerCAmelCase_ : List[Any] = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = self.conv_in(__lowercase ) lowerCAmelCase_ : Any = nn.silu(__lowercase ) for block in self.blocks: lowerCAmelCase_ : int = block(__lowercase ) lowerCAmelCase_ : List[str] = nn.silu(__lowercase ) lowerCAmelCase_ : int = self.conv_out(__lowercase ) return embedding @flax_register_to_config class snake_case__( nn.Module, lowercase__, lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = 32 SCREAMING_SNAKE_CASE__ : int = 4 SCREAMING_SNAKE_CASE__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1_280, 1_280) SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE__ : int = 1_280 SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : str = "rgb" SCREAMING_SNAKE_CASE__ : Tuple[int] = (16, 32, 96, 256) def lowercase_ ( self , __lowercase ) -> List[str]: # init input tensors lowerCAmelCase_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size) lowerCAmelCase_ : Optional[int] = jnp.zeros(__lowercase , dtype=jnp.floataa ) lowerCAmelCase_ : List[Any] = jnp.ones((1,) , dtype=jnp.intaa ) lowerCAmelCase_ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowerCAmelCase_ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8) lowerCAmelCase_ : Optional[Any] = jnp.zeros(__lowercase , dtype=jnp.floataa ) lowerCAmelCase_ , lowerCAmelCase_ : Any = jax.random.split(__lowercase ) lowerCAmelCase_ : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )["params"] def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[str] = self.block_out_channels lowerCAmelCase_ : List[Any] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCAmelCase_ : List[Any] = self.num_attention_heads or self.attention_head_dim # input lowerCAmelCase_ : str = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowerCAmelCase_ : Dict = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowerCAmelCase_ : Union[str, Any] = FlaxTimestepEmbedding(__lowercase , dtype=self.dtype ) lowerCAmelCase_ : Optional[int] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowerCAmelCase_ : Dict = self.only_cross_attention if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : int = (only_cross_attention,) * len(self.down_block_types ) if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types ) # down lowerCAmelCase_ : str = [] lowerCAmelCase_ : Any = [] lowerCAmelCase_ : Tuple = block_out_channels[0] lowerCAmelCase_ : Optional[int] = nn.Conv( __lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__lowercase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCAmelCase_ : Optional[int] = output_channel lowerCAmelCase_ : str = block_out_channels[i] lowerCAmelCase_ : List[str] = i == len(__lowercase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCAmelCase_ : List[str] = FlaxCrossAttnDownBlockaD( in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowerCAmelCase_ : Union[str, Any] = FlaxDownBlockaD( in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(__lowercase ) for _ in range(self.layers_per_block ): lowerCAmelCase_ : Optional[int] = nn.Conv( __lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__lowercase ) if not is_final_block: lowerCAmelCase_ : int = nn.Conv( __lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__lowercase ) lowerCAmelCase_ : List[Any] = down_blocks lowerCAmelCase_ : Tuple = controlnet_down_blocks # mid lowerCAmelCase_ : Tuple = block_out_channels[-1] lowerCAmelCase_ : str = FlaxUNetMidBlockaDCrossAttn( in_channels=__lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowerCAmelCase_ : Union[str, Any] = nn.Conv( __lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = 1.0 , __lowercase = True , __lowercase = False , ) -> Tuple: lowerCAmelCase_ : Any = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCAmelCase_ : Optional[int] = jnp.flip(__lowercase , axis=1 ) # 1. time if not isinstance(__lowercase , jnp.ndarray ): lowerCAmelCase_ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(__lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa ) lowerCAmelCase_ : List[Any] = jnp.expand_dims(__lowercase , 0 ) lowerCAmelCase_ : str = self.time_proj(__lowercase ) lowerCAmelCase_ : Optional[Any] = self.time_embedding(__lowercase ) # 2. pre-process lowerCAmelCase_ : Dict = jnp.transpose(__lowercase , (0, 2, 3, 1) ) lowerCAmelCase_ : str = self.conv_in(__lowercase ) lowerCAmelCase_ : Optional[Any] = jnp.transpose(__lowercase , (0, 2, 3, 1) ) lowerCAmelCase_ : List[Any] = self.controlnet_cond_embedding(__lowercase ) sample += controlnet_cond # 3. down lowerCAmelCase_ : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ , lowerCAmelCase_ : str = down_block(__lowercase , __lowercase , __lowercase , deterministic=not train ) else: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = down_block(__lowercase , __lowercase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCAmelCase_ : int = self.mid_block(__lowercase , __lowercase , __lowercase , deterministic=not train ) # 5. contronet blocks lowerCAmelCase_ : int = () for down_block_res_sample, controlnet_block in zip(__lowercase , self.controlnet_down_blocks ): lowerCAmelCase_ : Optional[Any] = controlnet_block(__lowercase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCAmelCase_ : Union[str, Any] = controlnet_down_block_res_samples lowerCAmelCase_ : Optional[Any] = self.controlnet_mid_block(__lowercase ) # 6. scaling lowerCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=__lowercase , mid_block_res_sample=__lowercase )
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
from __future__ import annotations from decimal import Decimal from numpy import array def lowerCAmelCase ( lowerCAmelCase_ ): lowerCAmelCase_ : Optional[int] = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(lowerCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase_ : Union[str, Any] = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase_ : Dict = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase_ , lowerCAmelCase_ : Dict = matrix[1][1], matrix[0][0] lowerCAmelCase_ , lowerCAmelCase_ : Any = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(lowerCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(lowerCAmelCase_ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase_ : Tuple = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix lowerCAmelCase_ : Tuple = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase_ : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase_ : Any = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase_ : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase_ : Tuple = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase_ : List[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase_ : List[str] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase_ : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase_ : Optional[Any] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase_ : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase_ : int = array(lowerCAmelCase_ ) for i in range(3 ): for j in range(3 ): lowerCAmelCase_ : str = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase_ : List[Any] = array(lowerCAmelCase_ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(lowerCAmelCase_ ) # Calculate the inverse of the matrix return [[float(d(lowerCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCAmelCase : Union[str, Any] ="""2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCAmelCase : List[Any] =concatenate_datasets _UpperCAmelCase : List[str] =DownloadConfig _UpperCAmelCase : Union[str, Any] =DownloadManager _UpperCAmelCase : str =DownloadMode _UpperCAmelCase : Union[str, Any] =DownloadConfig _UpperCAmelCase : List[str] =DownloadMode _UpperCAmelCase : Dict =DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
import numpy as np import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 8 , lowerCAmelCase_ = None )-> List[Any]: lowerCAmelCase_ : Optional[Any] = np.random.default_rng(seed=_lowerCamelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowerCAmelCase_ : Optional[Any] = 6 * key_len # Measurement basis for Alice's qubits. lowerCAmelCase_ : List[str] = rng.integers(2 , size=_lowerCamelCase ) # The set of states Alice will prepare. lowerCAmelCase_ : str = rng.integers(2 , size=_lowerCamelCase ) # Measurement basis for Bob's qubits. lowerCAmelCase_ : List[str] = rng.integers(2 , size=_lowerCamelCase ) # Quantum Circuit to simulate BB84 lowerCAmelCase_ : Tuple = qiskit.QuantumCircuit(_lowerCamelCase , name='''BB84''' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_lowerCamelCase ): if alice_state[index] == 1: bbaa_circ.x(_lowerCamelCase ) if alice_basis[index] == 1: bbaa_circ.h(_lowerCamelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_lowerCamelCase ): if bob_basis[index] == 1: bbaa_circ.h(_lowerCamelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowerCAmelCase_ : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowerCAmelCase_ : Tuple = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase ) # Returns the result of measurement. lowerCAmelCase_ : Optional[int] = job.result().get_counts(_lowerCamelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowerCAmelCase_ : Any = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowerCAmelCase_ : Optional[int] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , '''0''' ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
from datetime import datetime as dt import os from github import Github _UpperCAmelCase =[ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : List[Any] = Github(os.environ['''GITHUB_TOKEN'''] ) lowerCAmelCase_ : List[str] = g.get_repo('''huggingface/transformers''' ) lowerCAmelCase_ : Any = repo.get_issues(state='''open''' ) for issue in open_issues: lowerCAmelCase_ : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase_ : i.created_at , reverse=snake_case__ ) lowerCAmelCase_ : int = comments[0] if len(snake_case__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) _UpperCAmelCase : List[str] =R"""\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n""" class snake_case__( UpperCAmelCase__ ): '''simple docstring''' @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Union[str, Any]: raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase = None ) -> str: lowerCAmelCase_ : Dict = max_length lowerCAmelCase_ : int = max_position_embeddings @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Optional[Any]: lowerCAmelCase_ : int = input_ids.shape[-1] lowerCAmelCase_ : Tuple = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ '''exceptions, performance degradation, or nothing at all.''' ) return is_done class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> int: warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ '''with `max_length = start_length + max_new_tokens` instead.''' , __lowerCAmelCase , ) lowerCAmelCase_ : Any = start_length lowerCAmelCase_ : Dict = max_new_tokens lowerCAmelCase_ : Union[str, Any] = start_length + max_new_tokens @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Tuple: return input_ids.shape[-1] >= self.max_length class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase = None ) -> Any: lowerCAmelCase_ : Dict = max_time lowerCAmelCase_ : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Dict: return time.time() - self.initial_timestamp > self.max_time class snake_case__( UpperCAmelCase__ ): '''simple docstring''' @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowercase , __lowercase , **__lowercase ) -> Tuple: return any(criteria(__lowerCAmelCase , __lowerCAmelCase ) for criteria in self ) @property def lowercase_ ( self ) -> Dict: for stopping_criterium in self: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return stopping_criterium.max_length elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): return stopping_criterium.max_length return None def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> StoppingCriteriaList: lowerCAmelCase_ : List[Any] = stopping_criteria.max_length lowerCAmelCase_ : List[Any] = deepcopy(__snake_case ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __snake_case ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case ) ) return new_stopping_criteria
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Any ={ "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Union[str, Any] =["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int =[ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int =[ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys _UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0