code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_a = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
UpperCAmelCase_ : Union[str, Any] = getattr(lowercase_ , pre_tok_state.pop("type" ) )
UpperCAmelCase_ : Any = add_prefix_space
UpperCAmelCase_ : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase_ : List[Any] = add_prefix_space
UpperCAmelCase_ : List[Any] = "post_processor"
UpperCAmelCase_ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
UpperCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ : Any = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ : Optional[int] = tuple(state["cls"] )
UpperCAmelCase_ : List[Any] = False
if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
UpperCAmelCase_ : int = add_prefix_space
UpperCAmelCase_ : Optional[int] = True
if state.get("trim_offsets" , lowercase_ ) != trim_offsets:
UpperCAmelCase_ : List[str] = trim_offsets
UpperCAmelCase_ : List[str] = True
if changes_to_apply:
UpperCAmelCase_ : Optional[Any] = getattr(lowercase_ , state.pop("type" ) )
UpperCAmelCase_ : Any = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
UpperCAmelCase_ : Optional[Any] = value
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 61 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> list[int]:
'''simple docstring'''
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def _a ( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _a ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = encode(input("-> " ).strip().lower() )
print("Encoded: " , _lowerCAmelCase )
print("Decoded:" , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 371 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_lowerCamelCase : Any = f"https://www.google.com/search?q={query}&num=100"
_lowerCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_lowerCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 191 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"\w+[.]\d+"
lowerCamelCase__: int =re.findall(__a , __a )
for pat in pats:
lowerCamelCase__: Optional[int] =key.replace(__a , "_".join(pat.split("." ) ) )
return key
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__: int =pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__: List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__: Union[str, Any] =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__: int =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__: str =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__: int =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __a , __a , __a=42 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: int ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__: int =flax_model.init_weights(PRNGKey(__a ) )
lowerCamelCase__: Optional[int] =flatten_dict(__a )
lowerCamelCase__: int ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: List[str] =rename_key(__a )
lowerCamelCase__: str =tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__: List[str] =rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Union[str, Any] =jnp.asarray(__a )
return unflatten_dict(__a )
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase = 2_0_0):
__lowerCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__lowerCAmelCase = [0] * (pence + 1)
__lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(a__, pence + 1, 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 368 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCamelCase_ = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCamelCase_ = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCamelCase_ = BeautifulSoup(res.text, '''html.parser''')
lowerCamelCase_ = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 244 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __a : Union[str, Any] , __a : Any , __a : Union[str, Any]=None ):
'''simple docstring'''
if rng is None:
UpperCamelCase__ = random.Random()
UpperCamelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCamelCase__ = []
for _ in range(__a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase__ = np.array(__a , dtype=jnp.intaa ).reshape(__a )
return output
def __magic_name__ ( __a : Dict , __a : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor(__a , vocab_size=2 , rng=__a )
# make sure that at least one token is attended to for each batch
UpperCamelCase__ = 1
return attn_mask
@require_flax
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = ()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase__ = 2
UpperCamelCase__ = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , flax_model.params )
UpperCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE_ ).sequences
UpperCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = False
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = True
UpperCamelCase__ = max_length
UpperCamelCase__ = 0.8
UpperCamelCase__ = 10
UpperCamelCase__ = 0.3
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
UpperCamelCase__ = max_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
UpperCamelCase__ = 8
UpperCamelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = False
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = True
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase__ = 2
UpperCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jit(model.generate )
UpperCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """do_samples""" ):
model.generate(SCREAMING_SNAKE_CASE_ , do_samples=SCREAMING_SNAKE_CASE_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , """foo""" ):
UpperCamelCase__ = {"""foo""": """bar"""}
model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 244 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""LayoutLMv3FeatureExtractor"""]
_SCREAMING_SNAKE_CASE : List[str] = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'transfo-xl'
SCREAMING_SNAKE_CASE_ = ['mems']
SCREAMING_SNAKE_CASE_ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , lowercase_ : str=26_7735 , lowercase_ : Union[str, Any]=[2_0000, 4_0000, 20_0000] , lowercase_ : Union[str, Any]=1024 , lowercase_ : Tuple=1024 , lowercase_ : int=16 , lowercase_ : str=64 , lowercase_ : Union[str, Any]=4096 , lowercase_ : Dict=4 , lowercase_ : Dict=False , lowercase_ : Dict=18 , lowercase_ : Optional[Any]=1600 , lowercase_ : str=1000 , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Any=0 , lowercase_ : Union[str, Any]=-1 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]="normal" , lowercase_ : str=0.0_1 , lowercase_ : Any=0.0_1 , lowercase_ : Union[str, Any]=0.0_2 , lowercase_ : List[str]=1e-5 , lowercase_ : Optional[int]=0 , **lowercase_ : Union[str, Any] , ):
UpperCamelCase__ : Union[str, Any] =vocab_size
UpperCamelCase__ : Union[str, Any] =[]
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCamelCase__ : List[Any] =[False] + [True] * len(self.cutoffs )
else:
UpperCamelCase__ : Union[str, Any] =[False] + [False] * len(self.cutoffs )
UpperCamelCase__ : Dict =d_model
UpperCamelCase__ : Union[str, Any] =d_embed
UpperCamelCase__ : Optional[Any] =d_head
UpperCamelCase__ : str =d_inner
UpperCamelCase__ : List[Any] =div_val
UpperCamelCase__ : Any =pre_lnorm
UpperCamelCase__ : List[Any] =n_layer
UpperCamelCase__ : List[str] =n_head
UpperCamelCase__ : Dict =mem_len
UpperCamelCase__ : Optional[Any] =same_length
UpperCamelCase__ : Optional[int] =attn_type
UpperCamelCase__ : Any =clamp_len
UpperCamelCase__ : str =sample_softmax
UpperCamelCase__ : Optional[Any] =adaptive
UpperCamelCase__ : Tuple =dropout
UpperCamelCase__ : Any =dropatt
UpperCamelCase__ : Tuple =untie_r
UpperCamelCase__ : Optional[int] =init
UpperCamelCase__ : Optional[int] =init_range
UpperCamelCase__ : str =proj_init_std
UpperCamelCase__ : Union[str, Any] =init_std
UpperCamelCase__ : Optional[Any] =layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 157 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( lowercase_ :ArgumentParser ) -> List[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
raise NotImplementedError()
| 78 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained(
_UpperCAmelCase , architectures=['RobertaPreLayerNormForMaskedLM'])
# convert state_dict
SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=_UpperCAmelCase , filename='pytorch_model.bin'))
SCREAMING_SNAKE_CASE = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.'):
SCREAMING_SNAKE_CASE = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'):
continue
SCREAMING_SNAKE_CASE = tensor_value
SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_UpperCAmelCase , config=_UpperCAmelCase , state_dict=_UpperCAmelCase)
model.save_pretrained(_UpperCAmelCase)
# convert tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 137 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) )
__lowercase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
__lowercase = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__lowercase = os.path.join(self.tmpdirname, UpperCAmelCase__ )
with open(self.image_processor_file, "w", encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Optional[int], **UpperCAmelCase__ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="!", **UpperCAmelCase__ )
def _lowercase ( self : str, **UpperCAmelCase__ : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="!", **UpperCAmelCase__ )
def _lowercase ( self : str, **UpperCAmelCase__ : Any ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Optional[int] ):
__lowercase = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(UpperCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : str ):
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCAmelCase__ )
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCAmelCase__ )
def _lowercase ( self : Dict ):
__lowercase = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=UpperCAmelCase__ )
__lowercase = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=UpperCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(UpperCAmelCase__, return_tensors="np" )
__lowercase = processor(images=UpperCAmelCase__, return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def _lowercase ( self : Optional[Any] ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = "lower newer"
__lowercase = processor(text=UpperCAmelCase__, return_tensors="np" )
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowercase ( self : Any ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = "lower newer"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=UpperCAmelCase__, images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def _lowercase ( self : Optional[Any] ):
__lowercase = "google/owlvit-base-patch32"
__lowercase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowercase = ["cat", "nasa badge"]
__lowercase = processor(text=UpperCAmelCase__ )
__lowercase = 1_6
self.assertListEqual(list(inputs.keys() ), ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def _lowercase ( self : List[str] ):
__lowercase = "google/owlvit-base-patch32"
__lowercase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowercase = [["cat", "nasa badge"], ["person"]]
__lowercase = processor(text=UpperCAmelCase__ )
__lowercase = 1_6
__lowercase = len(UpperCAmelCase__ )
__lowercase = max([len(UpperCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def _lowercase ( self : Dict ):
__lowercase = "google/owlvit-base-patch32"
__lowercase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowercase = ["cat", "nasa badge"]
__lowercase = processor(text=UpperCAmelCase__ )
__lowercase = 1_6
__lowercase = inputs["input_ids"]
__lowercase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowercase ( self : List[str] ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = self.prepare_image_inputs()
__lowercase = processor(images=UpperCAmelCase__, query_images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def _lowercase ( self : List[Any] ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(UpperCAmelCase__ )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
| 144 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
'''simple docstring'''
print("Making key files...")
make_key_files("rsa", 1024)
print("Key files generation successful.")
def _A ( UpperCamelCase_ : int) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
print("Generating prime q...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
__lowercase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
__lowercase = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(UpperCamelCase_, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
__lowercase = cryptoMath.find_mod_inverse(UpperCamelCase_, (p - 1) * (q - 1))
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print("\nWARNING:")
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program.")
sys.exit()
__lowercase ,__lowercase = generate_key(UpperCamelCase_)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 144 | 1 |
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ : Tuple = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase__ ( a ) -> str:
re.sub('''<n>''' , '''''' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 121 | """simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
snake_case = XCLIPTextConfig()
# derive patch size from model name
snake_case = model_name.find('patch' )
snake_case = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case = XCLIPVisionConfig(patch_size=_UpperCamelCase , num_frames=_UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
snake_case = 3_0_7_2
snake_case = 1_2
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 1_6
snake_case = 2_4
snake_case = 7_6_8
snake_case = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case = 3_3_6
snake_case = XCLIPConfig.from_text_vision_configs(_UpperCamelCase , _UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
return config
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
snake_case = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(_UpperCamelCase )
if "attn.in_proj" in key:
snake_case = key.split('.' )
if key.startswith('visual' ):
snake_case = key_split[3]
snake_case = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
elif key.startswith('mit' ):
snake_case = key_split[2]
snake_case = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[dim : dim * 2]
snake_case = val[-dim:]
else:
snake_case = key_split[2]
snake_case = config.text_config.hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
else:
snake_case = rename_key(_UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case = val.T
snake_case = val
return orig_state_dict
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
snake_case = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
snake_case = 'eating_spaghetti.npy'
elif num_frames == 3_2:
snake_case = 'eating_spaghetti_32_frames.npy'
snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_UpperCamelCase , repo_type='dataset' , )
snake_case = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
snake_case = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
snake_case = model_to_url[model_name]
snake_case = 8
if "16-frames" in model_name:
snake_case = 1_6
elif "shot" in model_name:
snake_case = 3_2
snake_case = get_xclip_config(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case = 'pytorch_model.bin'
gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase )
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )['model']
else:
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase )['model']
snake_case = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
snake_case ,snake_case = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
snake_case = VideoMAEImageProcessor(size=_UpperCamelCase )
snake_case = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = XCLIPProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
snake_case = prepare_video(_UpperCamelCase )
snake_case = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case = model(**_UpperCamelCase )
# Verify outputs
snake_case = outputs.logits_per_video
snake_case = logits_per_video.softmax(dim=1 )
print('Probs:' , _UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_UpperCamelCase , organization='nielsr' )
processor.push_to_hub(_UpperCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_UpperCamelCase , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 150 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = mock.Mock()
_A = 500
_A = {}
_A = HTTPError
_A = {}
# Download this model to make sure it's in the cache.
_A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase ) as mock_head:
_A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = mock.Mock()
_A = 500
_A = {}
_A = HTTPError
_A = {}
# Download this model to make sure it's in the cache.
_A = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase ) as mock_head:
_A = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
try:
_A = tempfile.mktemp()
with open(__UpperCAmelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __UpperCAmelCase )
_A = AlbertTokenizer.from_pretrained(__UpperCAmelCase )
finally:
os.remove(__UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __UpperCAmelCase )
_A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCAmelCase ( cls : Dict ):
'''simple docstring'''
_A = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = os.path.join(__UpperCAmelCase , "vocab.txt" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_A = BertTokenizer(__UpperCAmelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
_A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase , repo_id="test-tokenizer" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
_A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = os.path.join(__UpperCAmelCase , "vocab.txt" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_A = BertTokenizer(__UpperCAmelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
_A = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
_A = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_A = os.path.join(__UpperCAmelCase , "vocab.txt" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_A = CustomTokenizer(__UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_A = os.path.join(__UpperCAmelCase , "vocab.txt" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_A = BertTokenizerFast.from_pretrained(__UpperCAmelCase )
bert_tokenizer.save_pretrained(__UpperCAmelCase )
_A = CustomTokenizerFast.from_pretrained(__UpperCAmelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
_A = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = Trie()
_A = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCAmelCase , ["AB", "C"] )
| 174 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=18 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
_A = size if size is not None else {"height": 20, "width": 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_A = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
_A = "Hello"
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 174 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = DebertaTokenizer
UpperCAmelCase = True
UpperCAmelCase = DebertaTokenizerFast
def _snake_case ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_UpperCAmelCase : Optional[int] = dict(zip(a_ ,range(len(a_ ) ) ) )
_UpperCAmelCase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Any = {"""unk_token""": """[UNK]"""}
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = """lower newer"""
_UpperCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Dict = """lower newer"""
_UpperCAmelCase : Optional[int] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase : Optional[int] = tokenizer.tokenize(a_ )
self.assertListEqual(a_ ,a_ )
_UpperCAmelCase : int = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) ,a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = tokenizer("""Hello""" ,"""World""" )
_UpperCAmelCase : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] ,a_ )
@slow
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" ,add_special_tokens=a_ )
_UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=a_ )
_UpperCAmelCase : Tuple = tokenizer.encode(
"""sequence builders""" ,add_special_tokens=a_ ,add_prefix_space=a_ )
_UpperCAmelCase : str = tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=a_ ,add_prefix_space=a_ )
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a_ )
_UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a_ ,a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : List[str] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_UpperCAmelCase : List[str] = tokenizer(a_ ,padding=a_ )
_UpperCAmelCase : Optional[int] = [tokenizer.decode(a_ ,skip_special_tokens=a_ ) for seq in encoding["""input_ids"""]]
# fmt: off
_UpperCAmelCase : Any = {
"""input_ids""": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCAmelCase : Optional[Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data ,a_ )
for expected, decoded in zip(a_ ,a_ ):
self.assertEqual(a_ ,a_ )
| 215 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[Any] = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _a ( _lowercase , unittest.TestCase):
_a : Tuple = BartphoTokenizer
_a : Optional[int] = False
_a : Dict = True
def UpperCAmelCase__( self : str )-> str:
super().setUp()
lowerCAmelCase__ : Optional[int] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase__ : Dict = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
lowerCAmelCase__ : List[str] = BartphoTokenizer(_SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__( self : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
lowerCAmelCase__ : Any = '''This is a là test'''
lowerCAmelCase__ : List[str] = '''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase__( self : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = BartphoTokenizer(_SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ : Union[str, Any] = '''This is a là test'''
lowerCAmelCase__ : Optional[int] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Optional[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[Any] , __A : int=8 ) -> Optional[Any]:
"""simple docstring"""
a_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if latents is None:
a_ : Tuple = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Tuple = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : int = torch.device(F"""cuda:{gpu_id}""" )
a_ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[int] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Optional[int]:
a_ : Union[str, Any] = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : int = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.scheduler.timesteps
a_ : str = self.movq.config.latent_channels
a_ , a_ : Optional[Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : Dict = {'image_embeds': image_embeds, 'hint': hint}
a_ : List[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : Optional[Any] = noise_pred.chunk(2 )
a_ , a_ : Optional[Any] = variance_pred.chunk(2 )
a_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : str = image.clamp(0 , 1 )
a_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 32 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowerCamelCase ( __a :Dict , __a :Tuple=None ) -> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __lowerCamelCase ( __a :Any , __a :Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __lowerCamelCase ( __a :Optional[int] , __a :Dict , __a :List[Any] , __a :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = requests.get(__a , headers=__a , allow_redirects=__a )
A__ = result.headers["""Location"""]
A__ = requests.get(__a , allow_redirects=__a )
A__ = os.path.join(__a , F'{artifact_name}.zip' )
with open(__a , """wb""" ) as fp:
fp.write(response.content )
def __lowerCamelCase ( __a :Tuple , __a :int=None ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
A__ = []
A__ = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
A__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ = line[: line.index(""": """ )]
A__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ = line[len("""FAILED """ ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
A__ = line
if len(__a ) != len(__a ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` '
F'and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ = None
if job_name and job_links:
A__ = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
A__ = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def __lowerCamelCase ( __a :Optional[Any] , __a :Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
A__ = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def __lowerCamelCase ( __a :List[str] , __a :List[str]=None ) -> Any:
"""simple docstring"""
A__ = Counter()
counter.update([x[1] for x in logs] )
A__ = counter.most_common()
A__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def __lowerCamelCase ( __a :Optional[int] ) -> int:
"""simple docstring"""
A__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ = test.split("""/""" )[2]
else:
A__ = None
return test
def __lowerCamelCase ( __a :Tuple , __a :str=None ) -> Optional[Any]:
"""simple docstring"""
A__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ = [x for x in logs if x[2] is not None]
A__ = {x[2] for x in logs}
A__ = {}
for test in tests:
A__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ = counter.most_common()
A__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ = sum(error_counts.values() )
if n_errors > 0:
A__ = {"""count""": n_errors, """errors""": error_counts}
A__ = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def __lowerCamelCase ( __a :Optional[int] ) -> int:
"""simple docstring"""
A__ = """| no. | error | status |"""
A__ = """|-:|:-|:-|"""
A__ = [header, sep]
for error in reduced_by_error:
A__ = reduced_by_error[error]["""count"""]
A__ = F'| {count} | {error[:1_0_0]} | |'
lines.append(__a )
return "\n".join(__a )
def __lowerCamelCase ( __a :str ) -> Any:
"""simple docstring"""
A__ = """| model | no. of errors | major error | count |"""
A__ = """|-:|-:|-:|-:|"""
A__ = [header, sep]
for model in reduced_by_model:
A__ = reduced_by_model[model]["""count"""]
A__ , A__ = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ = F'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A : Optional[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
A : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A : int = k.find(''' / ''')
A : str = k[index + len(''' / ''') :]
A : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A : Tuple = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A : str = reduce_by_error(errors)
A : Dict = reduce_by_model(errors)
A : Union[str, Any] = make_github_table(reduced_by_error)
A : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 276 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276 | 1 |
from math import sqrt
def UpperCamelCase( __UpperCamelCase : Dict ):
lowerCAmelCase_ : Tuple = 0
for i in range(1 ,int(sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(__SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(__SCREAMING_SNAKE_CASE ):
total += i
return total - n
def UpperCamelCase( __UpperCamelCase : Union[str, Any] = 10000 ):
lowerCAmelCase_ : Tuple = sum(
i
for i in range(1 ,__SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(__SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(__SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 103 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: int = seq_length
__lowerCAmelCase: Any = is_training
__lowerCAmelCase: List[Any] = use_input_mask
__lowerCAmelCase: Any = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: Union[str, Any] = vocab_size
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: int = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: Any = max_position_embeddings
__lowerCAmelCase: Optional[int] = type_vocab_size
__lowerCAmelCase: str = type_sequence_label_size
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: Dict = num_choices
__lowerCAmelCase: str = scope
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: Dict = None
if self.use_token_type_ids:
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: str = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Any = OpenLlamaModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Dict = True
__lowerCAmelCase: Union[str, Any] = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
# first forward pass
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__lowerCAmelCase: str = torch.cat([input_ids, next_tokens] , dim=-1)
__lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1)
__lowerCAmelCase: Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
__lowerCAmelCase: List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
__lowerCAmelCase: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3))
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): List[Any] = config_and_inputs
__lowerCAmelCase: Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = OpenLlamaModelTester(self)
__lowerCAmelCase: Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = 3
__lowerCAmelCase: Optional[Any] = input_dict["input_ids"]
__lowerCAmelCase: Optional[Any] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = 3
__lowerCAmelCase: Dict = "single_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Tuple = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Any = "multi_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Optional[int] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def lowercase_ ( self : Any , UpperCamelCase__ : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Any = ids_tensor([1, 1_0] , config.vocab_size)
__lowerCAmelCase: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: List[Any] = OpenLlamaModel(UpperCamelCase__)
original_model.to(UpperCamelCase__)
original_model.eval()
__lowerCAmelCase: int = original_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: str = original_model(UpperCamelCase__).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Dict = {"type": scaling_type, "factor": 10.0}
__lowerCAmelCase: List[str] = OpenLlamaModel(UpperCamelCase__)
scaled_model.to(UpperCamelCase__)
scaled_model.eval()
__lowerCAmelCase: Dict = scaled_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: Any = scaled_model(UpperCamelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
| 217 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : List[Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "lilt"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=None , a__=4 , a__=1024 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : List[str] = classifier_dropout
_lowerCAmelCase : str = channel_shrink_ratio
_lowerCAmelCase : int = max_ad_position_embeddings
| 126 | """simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_a : Union[str, Any] = 250_004
_a : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = MBartaaTokenizer
_UpperCamelCase : Any = MBartaaTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Optional[int] = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Tuple = MBartaaTokenizer(a__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Any = """<s>"""
_lowerCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a__ ) , 1054 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __A ( self ):
_lowerCAmelCase : str = MBartaaTokenizer(a__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : Any = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def __A ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(a__ )
_lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(a__ )
_lowerCAmelCase : int = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(a__ )
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(a__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Any = tokenizer_r.from_pretrained(a__ )
_lowerCAmelCase : int = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = "facebook/mbart-large-50-one-to-many-mmt"
_UpperCamelCase : int = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_UpperCamelCase : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_UpperCamelCase : List[str] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def __A ( cls ):
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_lowerCAmelCase : str = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 )
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def __A ( self ):
self.assertIn(a__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_lowerCAmelCase : int = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def __A ( self ):
_lowerCAmelCase : Any = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , a__ )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0]
self.assertEqual(ids[0] , a__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(a__ ) , a__ )
def __A ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a__ )
_lowerCAmelCase : List[Any] = MBartaaTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ )
@require_torch
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors="""pt""" )
_lowerCAmelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors="""pt""" )
_lowerCAmelCase : List[str] = targets["""input_ids"""]
_lowerCAmelCase : Any = shift_tokens_right(a__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(a__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 126 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 63 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XLMTokenizer
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = '''lower newer'''
__A : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
__A : Optional[Any] = '''lower'''
__A : Any = ['''low''', '''er</w>''']
__A : Tuple = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = tokens + ['''<unk>''']
__A : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 291 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase (A__ , A__ ):
"""simple docstring"""
lowerCamelCase__ = 1
@register_to_config
def __init__( self : Any , __magic_name__ : int = 1_000 , __magic_name__ : Optional[Union[np.ndarray, List[float]]] = None ) -> Tuple:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE_ = 4
# running values
SCREAMING_SNAKE_CASE_ = []
def __A ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, torch.device] = None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE_ = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE_ = timesteps.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
def __A ( self : Any , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True , ) -> Dict:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE_ = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE_ = timestep_index + 1
SCREAMING_SNAKE_CASE_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE_ = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE_ = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def __A ( self : int , __magic_name__ : torch.FloatTensor , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ) -> Dict:
return sample
def __A ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE_ = self.betas[timestep_index]
SCREAMING_SNAKE_CASE_ = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE_ = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE_ = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
SCREAMING_SNAKE_CASE_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ) -> Tuple:
return self.config.num_train_timesteps
| 118 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( a ) -> List[Any]:
for param in module.parameters():
_A: int = False
def lowerCamelCase__ ( ) -> Optional[int]:
_A: List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A: List[Any] = """mps"""
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: Any = plt.imshow(a )
fig.axes.get_xaxis().set_visible(a )
fig.axes.get_yaxis().set_visible(a )
plt.show()
def lowerCamelCase__ ( ) -> Dict:
_A: Tuple = datetime.now()
_A: Dict = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 368 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : str = logging.get_logger(__name__)
set_seed(7_70)
lowercase__ : List[str] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
lowercase__ : Any = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
lowercase__ : int = os.path.dirname(os.path.abspath(__file__))
lowercase__ : Dict = os.path.join(os.path.expanduser('~'), '.cache')
lowercase__ : Optional[Any] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def a__ ( lowercase : List[Any], lowercase : Dict=False ) -> str:
"""simple docstring"""
_UpperCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(lowercase, REMOTE_MODEL_PATHS[key]['''file_name'''] )
def a__ ( lowercase : int, lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
os.makedirs(lowercase, exist_ok=lowercase )
hf_hub_download(repo_id=lowercase, filename=lowercase, local_dir=lowercase )
def a__ ( lowercase : Union[str, Any], lowercase : List[Any], lowercase : int=False, lowercase : List[Any]="text" ) -> str:
"""simple docstring"""
if model_type == "text":
_UpperCamelCase = BarkSemanticModel
_UpperCamelCase = BarkSemanticConfig
_UpperCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCamelCase = BarkCoarseModel
_UpperCamelCase = BarkCoarseConfig
_UpperCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCamelCase = BarkFineModel
_UpperCamelCase = BarkFineConfig
_UpperCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCamelCase = F"""{model_type}_small""" if use_small else model_type
_UpperCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''], model_info['''file_name'''] )
_UpperCamelCase = torch.load(lowercase, map_location=lowercase )
# this is a hack
_UpperCamelCase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
_UpperCamelCase = model_args['''vocab_size''']
_UpperCamelCase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCamelCase = model_args.pop('''n_head''' )
_UpperCamelCase = model_args.pop('''n_embd''' )
_UpperCamelCase = model_args.pop('''n_layer''' )
_UpperCamelCase = ConfigClass(**checkpoint['''model_args'''] )
_UpperCamelCase = ModelClass(config=lowercase )
_UpperCamelCase = GenerationConfigClass()
_UpperCamelCase = model_generation_config
_UpperCamelCase = checkpoint['''model''']
# fixup checkpoint
_UpperCamelCase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(lowercase ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCamelCase = k[len(lowercase ) :]
for old_layer_name in new_layer_name_dict:
_UpperCamelCase = new_k.replace(lowercase, new_layer_name_dict[old_layer_name] )
_UpperCamelCase = state_dict.pop(lowercase )
_UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCamelCase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
_UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCamelCase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(lowercase ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase, strict=lowercase )
_UpperCamelCase = model.num_parameters(exclude_embeddings=lowercase )
_UpperCamelCase = checkpoint['''best_val_loss'''].item()
logger.info(F"""model loaded: {round(n_params/1e6, 1 )}M params, {round(lowercase, 3 )} loss""" )
model.eval()
model.to(lowercase )
del checkpoint, state_dict
return model
def a__ ( lowercase : Union[str, Any], lowercase : Union[str, Any]=False, lowercase : str="text" ) -> Any:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCamelCase = '''cpu''' # do conversion on cpu
_UpperCamelCase = _get_ckpt_path(lowercase, use_small=lowercase )
_UpperCamelCase = _load_model(lowercase, lowercase, model_type=lowercase, use_small=lowercase )
# load bark initial model
_UpperCamelCase = _bark_load_model(lowercase, '''cpu''', model_type=lowercase, use_small=lowercase )
if model_type == "text":
_UpperCamelCase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=lowercase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
_UpperCamelCase = 5
_UpperCamelCase = 10
if model_type in ["text", "coarse"]:
_UpperCamelCase = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
_UpperCamelCase = bark_model(lowercase )[0]
_UpperCamelCase = model(lowercase )
# take last logits
_UpperCamelCase = output_new_model_total.logits[:, [-1], :]
else:
_UpperCamelCase = 3
_UpperCamelCase = 8
_UpperCamelCase = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
_UpperCamelCase = model(lowercase, lowercase )
_UpperCamelCase = bark_model(lowercase, lowercase )
_UpperCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
def a__ ( lowercase : Dict, lowercase : Dict, lowercase : Tuple, lowercase : Union[str, Any], lowercase : str, lowercase : Tuple, ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, lowercase )
_UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(lowercase, '''config.json''' ) )
_UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(lowercase, '''config.json''' ) )
_UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(lowercase, '''config.json''' ) )
_UpperCamelCase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
_UpperCamelCase = BarkSemanticModel.from_pretrained(lowercase )
_UpperCamelCase = BarkCoarseModel.from_pretrained(lowercase )
_UpperCamelCase = BarkFineModel.from_pretrained(lowercase )
_UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
_UpperCamelCase = BarkConfig.from_sub_model_configs(
lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
_UpperCamelCase = BarkModel(lowercase )
_UpperCamelCase = semantic
_UpperCamelCase = coarseAcoustic
_UpperCamelCase = fineAcoustic
_UpperCamelCase = codec
_UpperCamelCase = bark_generation_config
Path(lowercase ).mkdir(exist_ok=lowercase )
bark.save_pretrained(lowercase, repo_id=lowercase, push_to_hub=lowercase )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowercase__ : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 324 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase_ : int = sum(array[:k] )
for i in range(len(__snake_case ) - k ):
UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 145 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : int = 6):
a : Node | None = None
a : Node | None = None
self.create_linked_list(__UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int):
a : Dict = Node()
a : List[Any] = current_node
a : str = current_node
a : Dict = current_node
for _ in range(1 , __UpperCAmelCase):
a : int = Node()
a : Any = current_node
a : int = previous_node
a : Optional[int] = current_node
a : Optional[int] = self.front
a : Union[str, Any] = previous_node
def __snake_case ( self : str):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case ( self : Union[str, Any]):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
a : Optional[int] = self.rear.next
if self.rear:
a : List[str] = data
def __snake_case ( self : List[Any]):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
a : Optional[Any] = self.front.data
a : Tuple = None
return data
a : Optional[Any] = self.front
a : Optional[Any] = old_front.next
a : Dict = old_front.data
a : str = None
return data
def __snake_case ( self : Any):
if self.is_empty():
raise Exception("Empty Queue")
def __snake_case ( self : int):
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue")
class _A :
"""simple docstring"""
def __init__( self : Dict):
a : Any | None = None
a : Node | None = None
a : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(lowercase )
elif "subsample" in key:
UpperCamelCase = s_dict.pop(lowercase )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(lowercase , lowercase , bias=lowercase )
UpperCamelCase = emb.weight.data
return lin_layer
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase , map_location='cpu' )
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(lowercase )
rename_keys(lowercase )
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(lowercase ) for i in args.conv_kernel_sizes.split(',' )]
UpperCamelCase = SpeechaTextConfig(
vocab_size=lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase , num_beams=5 , max_length=200 , use_cache=lowercase , decoder_start_token_id=2 , early_stopping=lowercase , )
UpperCamelCase = SpeechaTextForConditionalGeneration(lowercase )
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(lowercase , strict=lowercase )
if len(lowercase ) > 0 and not set(lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 222 | 0 |
__UpperCamelCase : Tuple = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__UpperCamelCase : Any = {value: key for key, value in encode_dict.items()}
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : str = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def A ( _lowercase ):
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
SCREAMING_SNAKE_CASE : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 | from __future__ import annotations
import numpy as np
def A ( _lowercase ):
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
UpperCamelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 153 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=[1, 16, 4, 4] , lowerCAmelCase__=None , ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = image_size
__magic_name__ : int = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Tuple = is_training
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : str = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Optional[int] = scope
__magic_name__ : Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__magic_name__ : List[Any] = (self.image_size // 32) ** 2
__magic_name__ : str = num_patches + 1
def __magic_name__ ( self ) -> Any:
__magic_name__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase__ , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : str = ViTHybridModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = self.type_sequence_label_size
__magic_name__ : Dict = ViTHybridForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[int] = config_and_inputs
__magic_name__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : List[Any] = False
lowercase__ : int = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[int] = ViTHybridModelTester(self )
__magic_name__ : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Any:
__magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> int:
__magic_name__ ,__magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = model_class(lowerCAmelCase__ )
__magic_name__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Union[str, Any] = [*signature.parameters.keys()]
__magic_name__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__magic_name__ : Dict = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __magic_name__ ( self ) -> Dict:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = ViTHybridModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> List[str]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__magic_name__ : List[Any] = self.default_image_processor
__magic_name__ : str = prepare_img()
__magic_name__ : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Any = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__magic_name__ : Any = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__magic_name__ : Any = prepare_img()
__magic_name__ : str = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
__magic_name__ : str = model(**lowerCAmelCase__ )
__magic_name__ : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
__magic_name__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 138 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 1 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase =DatasetDict(
{
"""train""": dataset["""train"""].select(__UpperCamelCase ),
"""validation""": dataset["""train"""].select(__UpperCamelCase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase =datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase =16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase =8
else:
_lowerCAmelCase =None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_lowerCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_lowerCAmelCase =DataLoader(
tokenized_datasets["""test"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
# New Code #
_lowerCAmelCase =[]
# Download the dataset
_lowerCAmelCase =load_dataset("""glue""" , """mrpc""" )
# Create our splits
_lowerCAmelCase =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config["""lr"""]
_lowerCAmelCase =int(config["""num_epochs"""] )
_lowerCAmelCase =int(config["""seed"""] )
_lowerCAmelCase =int(config["""batch_size"""] )
_lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase =MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
# New Code #
# Create our folds:
_lowerCAmelCase =kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_lowerCAmelCase =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__UpperCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_fold_dataloaders(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase =AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
_lowerCAmelCase =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =outputs.loss
_lowerCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
_lowerCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_lowerCAmelCase =[]
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =outputs.logits
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowerCAmelCase =torch.cat(__UpperCamelCase , dim=0 )
_lowerCAmelCase =torch.stack(__UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowerCAmelCase =metric.compute(predictions=__UpperCamelCase , references=__UpperCamelCase )
accelerator.print("""Average test metrics from all folds:""" , __UpperCamelCase )
def _lowerCamelCase() -> Dict:
_lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__UpperCamelCase , default=3 , help="""The number of splits to perform across the dataset""" )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 308 | 0 |
from __future__ import annotations
from math import pi
def A ( _lowercase , _lowercase , _lowercase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE :List[str] = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
SCREAMING_SNAKE_CASE :Any = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self : str , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="[UNK]" , _lowerCAmelCase : Dict="[SEP]" , _lowerCAmelCase : List[Any]="[PAD]" , _lowerCAmelCase : List[str]="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_lowerCAmelCase )
snake_case_ = do_lower_case
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 159 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "M-CLIP"
def __init__( self , _a=1_024 , _a=768 , **_a ):
"""simple docstring"""
lowerCamelCase = transformerDimSize
lowerCamelCase = imageDimSize
super().__init__(**_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = MCLIPConfig
def __init__( self , _a , *_a , **_a ):
"""simple docstring"""
super().__init__(_a , *_a , **_a )
lowerCamelCase = XLMRobertaModel(_a )
lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.transformer(input_ids=_a , attention_mask=_a )[0]
lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_a ), embs
| 365 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_vision_model"
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.00_001 , _a=0.0 , _a=1e-1_0 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
lowerCamelCase = qkv_bias
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_qformer"
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = cross_attention_frequency
lowerCamelCase = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip-2"
__UpperCamelCase = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
"""simple docstring"""
super().__init__(**_a )
if vision_config is None:
lowerCamelCase = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase = BlipaVisionConfig(**_a )
lowerCamelCase = BlipaQFormerConfig(**_a )
lowerCamelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase = CONFIG_MAPPING[text_model_type](**_a )
lowerCamelCase = self.text_config.tie_word_embeddings
lowerCamelCase = self.text_config.is_encoder_decoder
lowerCamelCase = num_query_tokens
lowerCamelCase = self.vision_config.hidden_size
lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase = 1.0
lowerCamelCase = 0.02
@classmethod
def _lowerCAmelCase ( cls , _a , _a , _a , **_a , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.qformer_config.to_dict()
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 168 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _snake_case ( a__ ):
lowerCAmelCase :Dict = '''switch_transformers'''
lowerCAmelCase :int = ['''past_key_values''']
lowerCAmelCase :List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _lowerCamelCase=3_2128 , _lowerCamelCase=768 , _lowerCamelCase=64 , _lowerCamelCase=2048 , _lowerCamelCase=64 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=8 , _lowerCamelCase=False , _lowerCamelCase=0.01 , _lowerCamelCase="float32" , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=128 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=0.001 , _lowerCamelCase=0.001 , _lowerCamelCase=1.0 , _lowerCamelCase="relu" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : List[Any] = d_kv
UpperCAmelCase__ : int = d_ff
UpperCAmelCase__ : Dict = num_sparse_encoder_layers
UpperCAmelCase__ : str = num_layers
UpperCAmelCase__ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCAmelCase__ : Any = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCAmelCase__ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCAmelCase__ : Dict = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCAmelCase__ : Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCAmelCase__ : Optional[Any] = num_heads
UpperCAmelCase__ : Dict = num_experts
UpperCAmelCase__ : Tuple = expert_capacity
UpperCAmelCase__ : List[str] = router_bias
UpperCAmelCase__ : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
UpperCAmelCase__ : List[Any] = router_dtype
UpperCAmelCase__ : Union[str, Any] = router_ignore_padding_tokens
UpperCAmelCase__ : Optional[int] = relative_attention_num_buckets
UpperCAmelCase__ : Tuple = relative_attention_max_distance
UpperCAmelCase__ : List[str] = dropout_rate
UpperCAmelCase__ : Optional[int] = layer_norm_epsilon
UpperCAmelCase__ : List[str] = initializer_factor
UpperCAmelCase__ : List[Any] = feed_forward_proj
UpperCAmelCase__ : Tuple = use_cache
UpperCAmelCase__ : Union[str, Any] = add_router_probs
UpperCAmelCase__ : Dict = router_z_loss_coef
UpperCAmelCase__ : int = router_aux_loss_coef
UpperCAmelCase__ : List[str] = self.feed_forward_proj.split("""-""")
UpperCAmelCase__ : int = act_info[-1]
UpperCAmelCase__ : Optional[int] = act_info[0] == """gated"""
if len(_lowerCamelCase) > 1 and act_info[0] != "gated" or len(_lowerCamelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase__ : Any = """gelu_new"""
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , ) | 354 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0 , UpperCamelCase__ = 2_2 ):
UpperCAmelCase__ : List[str] = range(1 , UpperCamelCase__ )
UpperCAmelCase__ : int = range(1 , UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""") | 283 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 |
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = pad_token_id
__UpperCamelCase = max_length
__UpperCamelCase = vocab
__UpperCamelCase = merges
__UpperCamelCase = BytePairTokenizer(__UpperCAmelCase , __UpperCAmelCase , sequence_length=__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [' '.join(__UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
__UpperCamelCase = tokenizer.get_vocab()
return cls(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
return cls.from_tokenizer(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase ):
'''simple docstring'''
return cls(**__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self.tf_tokenizer(__UpperCAmelCase )
__UpperCamelCase = tf.ones_like(__UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__UpperCamelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
__UpperCamelCase , __UpperCamelCase = pad_model_inputs(
__UpperCAmelCase , max_seq_length=__UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 263 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Any:
__UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7],
}
__UpperCamelCase = Dataset.from_dict(snake_case )
return dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase , __UpperCamelCase = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCAmelCase )
| 263 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 3
__SCREAMING_SNAKE_CASE : Any = 250
__SCREAMING_SNAKE_CASE : Any = ids_tensor((batch_size, length) , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = MaxLengthCriteria(max_length=10 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
__SCREAMING_SNAKE_CASE : Any = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__SCREAMING_SNAKE_CASE : Any = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 303 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Any = """huggingface/label-files"""
_lowerCAmelCase : List[str] = """imagenet-1k-id2label.json"""
_lowerCAmelCase : int = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase : Tuple = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCAmelCase : List[str] = BitConfig(
conv_layer=UpperCamelCase_ , num_labels=1000 , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , )
return config
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
if "stem.conv" in name:
_lowerCAmelCase : Tuple = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
_lowerCAmelCase : int = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
_lowerCAmelCase : Tuple = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
_lowerCAmelCase : Any = """bit.""" + name
if "bit" not in name and "classifier" not in name:
_lowerCAmelCase : List[str] = """bit.encoder.""" + name
return name
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : Any = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_config(UpperCamelCase_ )
# load original model from timm
_lowerCAmelCase : Any = create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ )
timm_model.eval()
# load state_dict of original model
_lowerCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCAmelCase : List[str] = state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : int = val.squeeze() if """head""" in key else val
# load HuggingFace model
_lowerCAmelCase : List[str] = BitForImageClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# create image processor
_lowerCAmelCase : Union[str, Any] = create_transform(**resolve_data_config({} , model=UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = transform.transforms
_lowerCAmelCase : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_lowerCAmelCase : Union[str, Any] = BitImageProcessor(
do_resize=UpperCamelCase_ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase_ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=UpperCamelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = transform(UpperCamelCase_ ).unsqueeze(0 )
_lowerCAmelCase : str = processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
# verify logits
with torch.no_grad():
_lowerCAmelCase : List[str] = model(UpperCamelCase_ )
_lowerCAmelCase : Tuple = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCAmelCase : Union[str, Any] = timm_model(UpperCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_lowerCamelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 159 |
_lowerCamelCase : List[Any] = tuple[float, float, float]
_lowerCamelCase : Tuple = tuple[float, float, float]
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad ):
'''simple docstring'''
_lowerCAmelCase : Tuple = end_pointa[0] - end_pointa[0]
_lowerCAmelCase : str = end_pointa[1] - end_pointa[1]
_lowerCAmelCase : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : Vectorad ):
'''simple docstring'''
_lowerCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowerCAmelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowerCAmelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase_ , UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : int = 10 ):
'''simple docstring'''
_lowerCAmelCase : Any = create_vector(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = create_vector(UpperCamelCase_ , UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
| 159 | 1 |
from math import factorial
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(a_ ) // (factorial(a_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 15 |
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_01 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 225 | 0 |
import os
import pytest
from attr import dataclass
A_ :Optional[int] = '''us-east-1''' # defaults region
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : str
UpperCamelCase__ : Dict ="""arn:aws:iam::558105141721:role/sagemaker_execution_role"""
UpperCamelCase__ : str ={
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
UpperCamelCase__ : Union[str, Any] ={**hyperparameters, """max_steps""": 1_0_0_0}
@property
def __lowercase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase ( self ):
"""simple docstring"""
return f'{self.framework}-transfromers-test'
@property
def __lowercase ( self ):
"""simple docstring"""
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def __lowercase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : Tuple =SageMakerTestEnvironment(framework=request.cls.framework )
| 245 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ):
"""simple docstring"""
if components is None:
__UpperCamelCase : Dict =[]
__UpperCamelCase : List[str] =list(lowerCamelCase__ )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Any =[self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =len(self )
if size == len(lowerCamelCase__ ):
__UpperCamelCase : Union[str, Any] =[self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (float, int) ):
__UpperCamelCase : str =[c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
__UpperCamelCase : Tuple =len(self )
__UpperCamelCase : Union[str, Any] =[self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __lowercase ( self ):
"""simple docstring"""
return Vector(self.__components )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase : List[Any] =value
def __lowercase ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase : Tuple =[c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self * other
__UpperCamelCase : str =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( a_ ) -> Vector:
assert isinstance(a_ ,a_ )
return Vector([0] * dimension )
def A ( a_ ,a_ ) -> Vector:
assert isinstance(a_ ,a_ ) and (isinstance(a_ ,a_ ))
__UpperCamelCase : Tuple =[0] * dimension
__UpperCamelCase : List[str] =1
return Vector(a_ )
def A ( a_ ,a_ ,a_ ) -> Vector:
assert (
isinstance(a_ ,a_ )
and isinstance(a_ ,a_ )
and (isinstance(a_ ,(int, float) ))
)
return x * scalar + y
def A ( a_ ,a_ ,a_ ) -> Vector:
random.seed(a_ )
__UpperCamelCase : List[Any] =[random.randint(a_ ,a_ ) for _ in range(a_ )]
return Vector(a_ )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =matrix
__UpperCamelCase : List[str] =w
__UpperCamelCase : int =h
def __str__( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : int =[]
for i in range(self.__height ):
__UpperCamelCase : str =[
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCamelCase__ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase : str =[]
for i in range(self.__height ):
__UpperCamelCase : Optional[int] =[
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
@overload
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
...
def __mul__( self , lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
__UpperCamelCase : Dict =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase : Optional[Any] =[
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
__UpperCamelCase : Any =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __lowercase ( self ):
"""simple docstring"""
return self.__height
def __lowercase ( self ):
"""simple docstring"""
return self.__width
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase : Tuple =value
else:
raise Exception('change_component: indices out of bounds' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase : Any =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
__UpperCamelCase : Optional[Any] =minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def __lowercase ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase : Tuple =[
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( a_ ) -> Matrix:
__UpperCamelCase : list[list[float]] =[[0] * n for _ in range(a_ )]
return Matrix(a_ ,a_ ,a_ )
def A ( a_ ,a_ ,a_ ,a_ ) -> Matrix:
random.seed(a_ )
__UpperCamelCase : list[list[float]] =[
[random.randint(a_ ,a_ ) for _ in range(a_ )] for _ in range(a_ )
]
return Matrix(a_ ,a_ ,a_ )
| 245 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["pixel_values"]
def __init__( self : Dict ,_a : Optional[Any] = True ,_a : Any = None ,_a : Tuple = PILImageResampling.BICUBIC ,_a : str = True ,_a : Union[str, Any] = None ,_a : List[str] = True ,_a : int = 1 / 255 ,_a : Dict = True ,_a : Tuple = None ,_a : List[str] = None ,_a : Tuple = True ,**_a : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_a : List[Any] = size if size is not None else {"""shortest_edge""": 224}
_a : Any = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
_a : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_a : Optional[Any] = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ,param_name='crop_size' )
_a : Any = do_resize
_a : Optional[Any] = size
_a : Dict = resample
_a : Optional[Any] = do_center_crop
_a : List[str] = crop_size
_a : Optional[int] = do_rescale
_a : Any = rescale_factor
_a : int = do_normalize
_a : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a : int = image_std if image_std is not None else OPENAI_CLIP_STD
_a : Optional[Any] = do_convert_rgb
def __lowercase ( self : Optional[int] ,_a : Optional[int] ,_a : Any ,_a : Tuple = PILImageResampling.BICUBIC ,_a : List[Any] = None ,**_a : Any ,):
'''simple docstring'''
_a : Tuple = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : Dict = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowercase ( self : int ,_a : str ,_a : Dict ,_a : List[Any] = None ,**_a : List[str] ,):
'''simple docstring'''
_a : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowercase ( self : List[Any] ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] = None ,**_a : Optional[int] ,):
'''simple docstring'''
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowercase ( self : Optional[int] ,_a : Optional[Any] ,_a : int ,_a : List[str] ,_a : List[str] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowercase ( self : Optional[Any] ,_a : int ,_a : Optional[int] = None ,_a : Any = None ,_a : Union[str, Any] = None ,_a : Any = None ,_a : str = None ,_a : List[str] = None ,_a : List[str] = None ,_a : Any = None ,_a : List[str] = None ,_a : Union[str, Any] = None ,_a : Optional[Any] = None ,_a : Tuple = None ,_a : Union[str, Any] = ChannelDimension.FIRST ,**_a : str ,):
'''simple docstring'''
_a : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_a : Dict = size if size is not None else self.size
_a : Dict = get_size_dict(lowerCamelCase__ ,param_name='size' ,default_to_square=lowerCamelCase__ )
_a : Tuple = resample if resample is not None else self.resample
_a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : str = crop_size if crop_size is not None else self.crop_size
_a : int = get_size_dict(lowerCamelCase__ ,param_name='crop_size' ,default_to_square=lowerCamelCase__ )
_a : str = do_rescale if do_rescale is not None else self.do_rescale
_a : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : Dict = image_mean if image_mean is not None else self.image_mean
_a : Dict = image_std if image_std is not None else self.image_std
_a : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : List[str] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : Union[str, Any] = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_a : int = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_a : Dict = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_a : str = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images]
if do_rescale:
_a : int = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_a : Tuple = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
_a : List[Any] = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_a : int = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 271 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__lowerCamelCase : Tuple = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = np.argmax(_lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def A_ ( _lowerCAmelCase ) -> int:
with open(_lowerCAmelCase , encoding="utf_8" ) as f:
UpperCamelCase : Any = csv.reader(_lowerCAmelCase )
UpperCamelCase : List[str] = []
next(_lowerCAmelCase ) # skip the first line
for line in tqdm(_lowerCAmelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[int] = []
for dataset in encoded_datasets:
UpperCamelCase : List[str] = len(_lowerCAmelCase )
UpperCamelCase : List[Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCamelCase : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCamelCase : Dict = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCamelCase : List[str] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowerCAmelCase ):
UpperCamelCase : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase : Dict = with_conta
UpperCamelCase : Optional[Any] = with_conta
UpperCamelCase : Tuple = len(_lowerCAmelCase ) - 1
UpperCamelCase : str = len(_lowerCAmelCase ) - 1
UpperCamelCase : Union[str, Any] = with_conta
UpperCamelCase : Tuple = with_conta
UpperCamelCase : int = mc_label
UpperCamelCase : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def A_ ( ) -> Optional[Any]:
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCAmelCase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_lowerCAmelCase , default="" )
parser.add_argument("--eval_dataset" , type=_lowerCAmelCase , default="" )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=42 )
parser.add_argument("--num_train_epochs" , type=_lowerCAmelCase , default=3 )
parser.add_argument("--train_batch_size" , type=_lowerCAmelCase , default=8 )
parser.add_argument("--eval_batch_size" , type=_lowerCAmelCase , default=16 )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_lowerCAmelCase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_lowerCAmelCase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_lowerCAmelCase , default=6.25e-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_lowerCAmelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_lowerCAmelCase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_lowerCAmelCase , default=0.01 )
parser.add_argument("--lm_coef" , type=_lowerCAmelCase , default=0.9 )
parser.add_argument("--n_valid" , type=_lowerCAmelCase , default=374 )
parser.add_argument("--server_ip" , type=_lowerCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCAmelCase , default="" , help="Can be used for distant debugging." )
UpperCamelCase : Optional[int] = parser.parse_args()
print(_lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCamelCase : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCamelCase : List[str] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCamelCase : Optional[int] = ["_start_", "_delimiter_", "_classify_"]
UpperCamelCase : List[str] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
UpperCamelCase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
model.to(_lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowerCAmelCase ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return obj
return [tokenize_and_encode(_lowerCAmelCase ) for o in obj]
logger.info("Encoding dataset..." )
UpperCamelCase : List[str] = load_rocstories_dataset(args.train_dataset )
UpperCamelCase : str = load_rocstories_dataset(args.eval_dataset )
UpperCamelCase : Dict = (train_dataset, eval_dataset)
UpperCamelCase : List[Any] = tokenize_and_encode(_lowerCAmelCase )
# Compute the max input length for the Transformer
UpperCamelCase : Dict = model.config.n_positions // 2 - 2
UpperCamelCase : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCamelCase : Any = min(_lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCamelCase : str = pre_process_datasets(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = tensor_datasets[0], tensor_datasets[1]
UpperCamelCase : Optional[int] = TensorDataset(*_lowerCAmelCase )
UpperCamelCase : Dict = RandomSampler(_lowerCAmelCase )
UpperCamelCase : Any = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.train_batch_size )
UpperCamelCase : List[Any] = TensorDataset(*_lowerCAmelCase )
UpperCamelCase : Optional[Any] = SequentialSampler(_lowerCAmelCase )
UpperCamelCase : List[str] = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCamelCase : Union[str, Any] = args.max_steps
UpperCamelCase : Optional[Any] = args.max_steps // (len(_lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCamelCase : List[str] = list(model.named_parameters() )
UpperCamelCase : Optional[Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
UpperCamelCase : Optional[int] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
UpperCamelCase : Optional[Any] = AdamW(_lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCamelCase : Tuple = get_linear_schedule_with_warmup(
_lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowerCAmelCase )
if args.do_train:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
UpperCamelCase : Dict = 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : str = tqdm(_lowerCAmelCase , desc="Training" )
for step, batch in enumerate(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = tuple(t.to(_lowerCAmelCase ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = batch
UpperCamelCase : List[Any] = model(_lowerCAmelCase , mc_token_ids=_lowerCAmelCase , lm_labels=_lowerCAmelCase , mc_labels=_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCamelCase : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCamelCase : Union[str, Any] = "Training loss: {:.2e} lr: {:.2e}".format(_lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCamelCase : Optional[int] = model.module if hasattr(_lowerCAmelCase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCamelCase : int = os.path.join(args.output_dir , _lowerCAmelCase )
UpperCamelCase : List[Any] = os.path.join(args.output_dir , _lowerCAmelCase )
torch.save(model_to_save.state_dict() , _lowerCAmelCase )
model_to_save.config.to_json_file(_lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCamelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCamelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowerCAmelCase )
if args.do_eval:
model.eval()
UpperCamelCase , UpperCamelCase : Optional[int] = 0, 0
UpperCamelCase , UpperCamelCase : str = 0, 0
for batch in tqdm(_lowerCAmelCase , desc="Evaluating" ):
UpperCamelCase : List[Any] = tuple(t.to(_lowerCAmelCase ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = batch
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = model(
_lowerCAmelCase , mc_token_ids=_lowerCAmelCase , lm_labels=_lowerCAmelCase , mc_labels=_lowerCAmelCase )
UpperCamelCase : List[str] = mc_logits.detach().cpu().numpy()
UpperCamelCase : Union[str, Any] = mc_labels.to("cpu" ).numpy()
UpperCamelCase : List[str] = accuracy(_lowerCAmelCase , _lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCamelCase : Optional[int] = eval_loss / nb_eval_steps
UpperCamelCase : str = eval_accuracy / nb_eval_examples
UpperCamelCase : Optional[Any] = tr_loss / nb_tr_steps if args.do_train else None
UpperCamelCase : List[Any] = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
UpperCamelCase : Union[str, Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowerCAmelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 140 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = CpmAntTokenizer
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase__: Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase__: Optional[Any] = '今天天气真好!'
lowercase__: str = ['今天', '天气', '真', '好', '!']
lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[str] = '今天天气真好!'
lowercase__: List[str] = [tokenizer.bos_token] + tokens
lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 196 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Optional[int] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 1 |
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__UpperCAmelCase = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase = re.compile(r'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__UpperCAmelCase = re.compile(r'''^\s*try:''')
# Catches a line with else:
__UpperCAmelCase = re.compile(r'''^\s*else:''')
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
UpperCamelCase : str = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def UpperCamelCase ( snake_case__ : str ) -> str:
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : int = f.readlines()
UpperCamelCase : List[Any] = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : Any = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
UpperCamelCase : Union[str, Any] = re.findall(R'\[([^\]]+)\]' , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
UpperCamelCase : Tuple = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
UpperCamelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : List[str] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
UpperCamelCase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
UpperCamelCase : str = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(', ' )
UpperCamelCase : Tuple = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
UpperCamelCase : List[str] = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(', ' )
UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : str = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Dict = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : str = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
UpperCamelCase : str = lines[line_index]
UpperCamelCase : Union[str, Any] = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Optional[int] ) -> List[str]:
def find_duplicates(snake_case__ : Union[str, Any] ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : List[str] = []
for key in import_dict_objects.keys():
UpperCamelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : str = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase ( ) -> Any:
UpperCamelCase : Union[str, Any] = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
UpperCamelCase : List[str] = os.path.join(lowerCAmelCase__ , '__init__.py' )
UpperCamelCase : List[Any] = parse_init(lowerCAmelCase__ )
if objects is not None:
UpperCamelCase : List[Any] = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCamelCase : List[str] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError('\n\n'.join(lowerCAmelCase__ ) )
def UpperCamelCase ( ) -> List[Any]:
UpperCamelCase : List[str] = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
UpperCamelCase : Union[str, Any] = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
UpperCamelCase : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Union[str, Any] = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
UpperCamelCase : Any = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
__UpperCAmelCase = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def UpperCamelCase ( ) -> Tuple:
from transformers.utils import direct_transformers_import
UpperCamelCase : Optional[int] = direct_transformers_import(lowerCAmelCase__ )
UpperCamelCase : int = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase__ , '__init__.py' ) , 'r' ) as f:
UpperCamelCase : Any = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowerCAmelCase__ ) ) )
UpperCamelCase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase__ ) > 0:
UpperCamelCase : Optional[int] = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 119 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (KDPMaDiscreteScheduler,)
_lowerCAmelCase = 1_0
def __UpperCAmelCase ( self , **__magic_name__ ) -> int:
_a = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='v_prediction' )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __UpperCAmelCase ( self ) -> Tuple:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if str(__magic_name__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 168 | 0 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple="attention" ) -> Dict:
"""simple docstring"""
snake_case = snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict=False ) -> List[str]:
"""simple docstring"""
if split_mlp_wi:
snake_case = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
snake_case = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
snake_case = (wi_a, wi_a)
else:
snake_case = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
snake_case = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCAmelCase__ ( _UpperCamelCase : dict , *, _UpperCamelCase : int , _UpperCamelCase : bool , _UpperCamelCase : bool = False ) -> str:
"""simple docstring"""
snake_case = traverse_util.flatten_dict(variables['target'] )
snake_case = {'/'.join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , _UpperCamelCase )
snake_case = collections.OrderedDict()
# Shared embeddings.
snake_case = old['token_embedder/embedding']
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , 'encoder' , 'pre_attention_layer_norm' )
snake_case ,snake_case ,snake_case ,snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , 'encoder' , 'attention' )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 1 (MLP).
snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , 'encoder' , 'pre_mlp_layer_norm' )
snake_case ,snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , 'encoder' , _UpperCamelCase )
snake_case = layer_norm
if split_mlp_wi:
snake_case = wi[0].T
snake_case = wi[1].T
else:
snake_case = wi.T
snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , _UpperCamelCase , 'encoder' ).T
snake_case = old['encoder/encoder_norm/scale']
if not scalable_attention:
snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , 'encoder' ).T
snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , 'pre_self_attention_layer_norm' )
snake_case ,snake_case ,snake_case ,snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , 'self_attention' )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 1 (Cross Attention).
snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
snake_case ,snake_case ,snake_case ,snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , 'encoder_decoder_attention' )
snake_case = layer_norm
snake_case = k.T
snake_case = o.T
snake_case = q.T
snake_case = v.T
# Block i, layer 2 (MLP).
snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , 'pre_mlp_layer_norm' )
snake_case ,snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' , _UpperCamelCase )
snake_case = layer_norm
if split_mlp_wi:
snake_case = wi[0].T
snake_case = wi[1].T
else:
snake_case = wi.T
snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case = tax_relpos_bias_lookup(_UpperCamelCase , _UpperCamelCase , 'decoder' ).T
snake_case = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case = old['decoder/logits_dense/kernel'].T
return new
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : bool ) -> str:
"""simple docstring"""
snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
snake_case = state_dict['shared.weight']
return state_dict
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
snake_case = convert_tax_to_pytorch(
_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase , scalable_attention=_UpperCamelCase )
snake_case = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ) -> Optional[int]:
"""simple docstring"""
snake_case = MTaConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case = UMTaEncoderModel(_UpperCamelCase )
else:
snake_case = UMTaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print('Done' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 357 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """swinv2"""
_lowerCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase=2_24 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=32 , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowerCAmelCase )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
snake_case = (0, 0, 0, 0)
| 149 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowercase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 45 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : int = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """segformer"""
def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[3_2, 6_4, 1_6_0, 2_5_6] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.1 , A=0.02 , A=0.1 , A=1e-6 , A=2_5_6 , A=2_5_5 , **A , ) -> Dict:
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , A , )
snake_case : List[str] = num_channels
snake_case : Optional[int] = num_encoder_blocks
snake_case : Optional[int] = depths
snake_case : str = sr_ratios
snake_case : str = hidden_sizes
snake_case : Any = patch_sizes
snake_case : Tuple = strides
snake_case : List[str] = mlp_ratios
snake_case : Optional[Any] = num_attention_heads
snake_case : int = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[Any] = classifier_dropout_prob
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = drop_path_rate
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = decoder_hidden_size
snake_case : Tuple = kwargs.get("""reshape_last_stage""" , A )
snake_case : List[str] = semantic_loss_ignore_index
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self ) -> int:
return 1_2
| 124 | 0 |
"""simple docstring"""
import functools
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCamelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase__ : Dict = set(_lowerCamelCase )
@functools.cache
def dynamic_programming(_lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
UpperCAmelCase__ = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename='pytorch_model.bin' ) )
UpperCAmelCase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
UpperCAmelCase__ = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
UpperCAmelCase__ = tensor_value
UpperCAmelCase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=False , ) -> int:
_UpperCAmelCase : str = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
_UpperCAmelCase : List[str] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[str] = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : Any = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : List[str] = crop_size
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Optional[int] = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Tuple = do_reduce_labels
def __lowerCAmelCase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ():
_UpperCAmelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_UpperCAmelCase : Union[str, Any] = Image.open(dataset[0]['''file'''] )
_UpperCAmelCase : List[Any] = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ():
_UpperCAmelCase : Dict = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_UpperCAmelCase : Tuple = Image.open(ds[0]['''file'''] )
_UpperCAmelCase : List[Any] = Image.open(ds[1]['''file'''] )
_UpperCAmelCase : Dict = Image.open(ds[2]['''file'''] )
_UpperCAmelCase : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =BeitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = BeitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''center_crop''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , A )
_UpperCAmelCase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=A )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , A )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
_UpperCAmelCase : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_UpperCAmelCase : Any = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched
_UpperCAmelCase : List[str] = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_UpperCAmelCase : Any = prepare_semantic_single_inputs()
_UpperCAmelCase : int = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_UpperCAmelCase : int = prepare_semantic_batch_inputs()
_UpperCAmelCase : str = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_UpperCAmelCase : str = prepare_semantic_single_inputs()
_UpperCAmelCase : List[str] = image_processing(A , A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[str] = image_processing(A , A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
| 357 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
A__ : Optional[int] =logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int]=False , __snake_case : Any=False , __snake_case : Union[str, Any]=6.0 , __snake_case : Optional[Any]=None , __snake_case : Any=False , __snake_case : str=False , __snake_case : Any=None , __snake_case : List[str]="fp4" , __snake_case : Tuple=False , **__snake_case : int , ) -> List[str]:
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = llm_inta_threshold
_lowerCAmelCase = llm_inta_skip_modules
_lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload
_lowerCAmelCase = llm_inta_has_fpaa_weight
_lowerCAmelCase = bnb_abit_quant_type
_lowerCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowerCAmelCase = torch.floataa
elif isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = getattr(__snake_case , __snake_case )
elif isinstance(__snake_case , torch.dtype ):
_lowerCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def lowercase__ ( self : str ) -> Optional[int]:
if not isinstance(self.llm_inta_threshold , __snake_case ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __snake_case ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __snake_case ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
return self.load_in_abit or self.load_in_abit
def lowercase__ ( self : str ) -> Optional[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase__ ( cls : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , **__snake_case : Any ) -> str:
_lowerCAmelCase = cls(**__snake_case )
_lowerCAmelCase = []
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
to_remove.append(__snake_case )
for key in to_remove:
kwargs.pop(__snake_case , __snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase__ ( self : Tuple , __snake_case : Union[str, os.PathLike] ) -> str:
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
_lowerCAmelCase = self.to_dict()
_lowerCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n"""
writer.write(__snake_case )
def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
return f"{self.__class__.__name__} {self.to_json_string()}"
def lowercase__ ( self : Any , __snake_case : bool = True ) -> str:
if use_diff is True:
_lowerCAmelCase = self.to_diff_dict()
else:
_lowerCAmelCase = self.to_dict()
return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]:
_lowerCAmelCase = self.to_dict()
# get the default config dict
_lowerCAmelCase = BitsAndBytesConfig().to_dict()
_lowerCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowerCAmelCase = value
return serializable_config_dict
| 70 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
# Let's go
lowerCAmelCase = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 357 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCAmelCase : Tuple = "BlipImageProcessor"
UpperCAmelCase : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , A_ , A_ ) -> Dict:
lowerCAmelCase = False
super().__init__(A_ , A_ )
lowerCAmelCase = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
lowerCAmelCase = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __snake_case ( self , *A_ , **A_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*A_ , **A_ )
def __snake_case ( self , *A_ , **A_ ) -> Tuple:
return self.tokenizer.decode(*A_ , **A_ )
@property
def __snake_case ( self ) -> str:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 187 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = '''bart'''
_lowercase: Union[str, Any] = ['''past_key_values''']
_lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(__snake_case , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(__snake_case , __snake_case )
_lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case )
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 70 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
UpperCAmelCase : Dict = "Dummy User"
UpperCAmelCase : Optional[int] = "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"
UpperCAmelCase : Tuple = "https://hub-ci.huggingface.co"
UpperCAmelCase : Optional[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCAmelCase : Tuple = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCAmelCase : int = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
HfFolder.save_token(__lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
return HfApi(endpoint=__lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = HfFolder.get_token()
HfFolder.save_token(__lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(__lowerCAmelCase ):
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(__lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = F'''repo_txt_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 136 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase (_UpperCamelCase = "AAPL" ):
__lowerCAmelCase : int = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__lowerCAmelCase : Tuple = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
__lowerCAmelCase : Any = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}') | 182 |
"""simple docstring"""
import argparse
import datetime
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
__lowerCAmelCase : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
__lowerCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
__lowerCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
__lowerCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
__lowerCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
__lowerCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
__lowerCAmelCase : Tuple = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
__lowerCAmelCase : int = y - 1
__lowerCAmelCase : Tuple = m + 12
# maths var
__lowerCAmelCase : int = int(str(_UpperCamelCase )[:2] )
__lowerCAmelCase : int = int(str(_UpperCamelCase )[2:] )
__lowerCAmelCase : int = int(2.6 * m - 5.39 )
__lowerCAmelCase : int = int(c / 4 )
__lowerCAmelCase : int = int(k / 4 )
__lowerCAmelCase : int = int(d + k )
__lowerCAmelCase : int = int(t + u + v + x )
__lowerCAmelCase : int = int(z - (2 * c) )
__lowerCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
__lowerCAmelCase : str = F"Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCamelCase__ = parser.parse_args()
zeller(args.date_input) | 182 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_A = HfArgumentParser(InitializationArguments)
_A = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_A = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_A = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_A = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_A = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 62 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = "tapas"
def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1024 , lowerCAmelCase_=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0 , lowerCAmelCase_=10.0 , lowerCAmelCase_=0 , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=1.0 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_="ratio" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=64 , lowerCAmelCase_=32 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Optional[int]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_sizes
_snake_case = initializer_range
_snake_case = layer_norm_eps
# Fine-tuning task hyperparameters
_snake_case = positive_label_weight
_snake_case = num_aggregation_labels
_snake_case = aggregation_loss_weight
_snake_case = use_answer_as_supervision
_snake_case = answer_loss_importance
_snake_case = use_normalized_answer_loss
_snake_case = huber_loss_delta
_snake_case = temperature
_snake_case = aggregation_temperature
_snake_case = use_gumbel_for_cells
_snake_case = use_gumbel_for_aggregation
_snake_case = average_approximation_function
_snake_case = cell_selection_preference
_snake_case = answer_loss_cutoff
_snake_case = max_num_rows
_snake_case = max_num_columns
_snake_case = average_logits_per_cell
_snake_case = select_one_column
_snake_case = allow_empty_column_selection
_snake_case = init_cell_selection_weights_to_zero
_snake_case = reset_position_index_per_cell
_snake_case = disable_per_token_loss
# Aggregation hyperparameters
_snake_case = aggregation_labels
_snake_case = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
_snake_case = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 366 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """deta"""
UpperCAmelCase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Optional[Any]=2_048 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=2_048 , lowercase_ : Dict=8 , lowercase_ : List[str]=6 , lowercase_ : Optional[int]=1_024 , lowercase_ : Optional[Any]=8 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : str=True , lowercase_ : Optional[int]="relu" , lowercase_ : Tuple=256 , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Any=0.02 , lowercase_ : Optional[int]=1.0 , lowercase_ : int=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]="sine" , lowercase_ : List[Any]=5 , lowercase_ : int=4 , lowercase_ : int=4 , lowercase_ : Dict=True , lowercase_ : Dict=300 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=1 , lowercase_ : List[str]=5 , lowercase_ : int=2 , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=5 , lowercase_ : str=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : List[str] , ) -> List[str]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = backbone_config.pop('model_type' )
UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Any = config_class.from_dict(lowercase_ )
UpperCAmelCase : str = backbone_config
UpperCAmelCase : str = num_queries
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Any = d_model
UpperCAmelCase : Tuple = encoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Union[str, Any] = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Tuple = init_xavier_std
UpperCAmelCase : List[str] = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = auxiliary_loss
UpperCAmelCase : Any = position_embedding_type
# deformable attributes
UpperCAmelCase : Optional[Any] = num_feature_levels
UpperCAmelCase : str = encoder_n_points
UpperCAmelCase : Dict = decoder_n_points
UpperCAmelCase : List[Any] = two_stage
UpperCAmelCase : List[Any] = two_stage_num_proposals
UpperCAmelCase : int = with_box_refine
UpperCAmelCase : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCAmelCase : str = class_cost
UpperCAmelCase : List[Any] = bbox_cost
UpperCAmelCase : Any = giou_cost
# Loss coefficients
UpperCAmelCase : Optional[Any] = mask_loss_coefficient
UpperCAmelCase : Union[str, Any] = dice_loss_coefficient
UpperCAmelCase : Optional[int] = bbox_loss_coefficient
UpperCAmelCase : str = giou_loss_coefficient
UpperCAmelCase : str = eos_coefficient
UpperCAmelCase : Dict = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
return self.d_model
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 151 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase : Dict = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[str] = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : str = checkpoint['time_embed.0.weight']
UpperCAmelCase : Dict = checkpoint['time_embed.0.bias']
UpperCAmelCase : Optional[int] = checkpoint['time_embed.2.weight']
UpperCAmelCase : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase : str = checkpoint['label_emb.weight']
UpperCAmelCase : Any = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase : Tuple = unet_config['down_block_types']
UpperCAmelCase : Union[str, Any] = unet_config['layers_per_block']
UpperCAmelCase : Dict = unet_config['attention_head_dim']
UpperCAmelCase : Optional[Any] = unet_config['block_out_channels']
UpperCAmelCase : str = 1
UpperCAmelCase : int = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = channels_list[i]
UpperCAmelCase : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Any = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : str = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase : List[Any] = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
UpperCAmelCase : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase : int = 'mid_block.resnets.0'
UpperCAmelCase : Tuple = 'middle_block.0'
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = 'mid_block.attentions.0'
UpperCAmelCase : List[Any] = 'middle_block.1'
UpperCAmelCase : Optional[int] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = 'mid_block.resnets.1'
UpperCAmelCase : Dict = 'middle_block.2'
UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : int = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : List[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Any = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : int = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : int = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase : Tuple = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase : Any = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : str = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : Optional[Any] = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = checkpoint['out.0.weight']
UpperCAmelCase : Optional[int] = checkpoint['out.0.bias']
UpperCAmelCase : Tuple = checkpoint['out.2.weight']
UpperCAmelCase : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowercase__ = parser.parse_args()
lowercase__ = strabool(args.class_cond)
lowercase__ = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowercase__ = None
lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowercase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 151 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCAmelCase : Tuple = "bert-base-cased"
_lowerCAmelCase : str = "fp16"
_lowerCAmelCase : Optional[int] = "bf16"
_lowerCAmelCase : Optional[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__a =dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__snake_case ):
__a =self.dist_env.copy()
__a =f'{i + 1}'
__a =strategy
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__snake_case ):
__a =self.dist_env.copy()
__a =prefetch_policy
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__snake_case ):
__a =self.dist_env.copy()
__a =state_dict_type
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =AutoModel.from_pretrained(__snake_case )
for policy in FSDP_AUTO_WRAP_POLICY:
__a =self.dist_env.copy()
__a =policy
if policy == "TRANSFORMER_BASED_WRAP":
__a ='BertLayer'
elif policy == "SIZE_BASED_WRAP":
__a ='2000'
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__snake_case )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__a =self.dist_env.copy()
__a ='TRANSFORMER_BASED_WRAP'
__a ='T5Layer'
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
with self.assertRaises(__snake_case ) as cm:
fsdp_plugin.set_auto_wrap_policy(__snake_case )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
__a =self.dist_env.copy()
__a ='SIZE_BASED_WRAP'
__a ='0'
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__snake_case )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__a =self.dist_env.copy()
__a =mp_dtype
with mockenv_context(**__snake_case ):
__a =Accelerator()
if mp_dtype == "fp16":
__a =torch.floataa
elif mp_dtype == "bf16":
__a =torch.bfloataa
__a =MixedPrecision(param_dtype=__snake_case , reduce_dtype=__snake_case , buffer_dtype=__snake_case )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __snake_case )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __snake_case ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__a =self.dist_env.copy()
__a =str(__snake_case ).lower()
with mockenv_context(**__snake_case ):
__a =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__snake_case ) )
@require_fsdp
@require_multi_gpu
@slow
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__a =0.82
__a =[
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
__a ={
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__a =160
__a =160
__a =inspect.getfile(accelerate.test_utils )
__a =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =os.path.join(self.test_scripts_folder , 'test_performance.py' )
__a =['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
__a =cmd.copy()
for i, strategy in enumerate(__snake_case ):
if strategy.lower() in config:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
__a =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(__snake_case ):
__a =cmd.copy()
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
__a =len(__snake_case )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__a =cmd_config[:state_dict_config_index]
cmd_config.append(f'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
__a =cmd_config[:-1]
__a =os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
f'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
__a =[
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__a =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(__snake_case ):
if strategy.lower() in spec:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--peak_memory_upper_bound={peak_mem_upper_bound}',
f'--n_train={self.n_train}',
f'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 308 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __A( nn.Module ):
"""simple docstring"""
def __init__(self ):
super().__init__()
UpperCamelCase__ = nn.Linear(3 , 4 )
UpperCamelCase__ = nn.BatchNormad(4 )
UpperCamelCase__ = nn.Linear(4 , 5 )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_ ) ) )
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , model.state_dict() )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """index.json""" )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , F"{key}.dat" )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase__ = torch.randn(2 , 3 , dtype=SCREAMING_SNAKE_CASE_ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = offload_weight(SCREAMING_SNAKE_CASE_ , """weight""" , SCREAMING_SNAKE_CASE_ , {} )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """weight.dat""" )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(SCREAMING_SNAKE_CASE_ ).split(""".""" )[1]}} )
UpperCamelCase__ = load_offloaded_weight(SCREAMING_SNAKE_CASE_ , index["""weight"""] )
self.assertTrue(torch.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ModelForTest()
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {k: v for k, v in state_dict.items() if """linear2""" not in k}
UpperCamelCase__ = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
UpperCamelCase__ = {k: v for k, v in state_dict.items() if """weight""" in k}
UpperCamelCase__ = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Duplicates are removed
UpperCamelCase__ = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
UpperCamelCase__ = extract_submodules_state_dict(SCREAMING_SNAKE_CASE_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {"""a.1""": 0, """a.2""": 2} )
UpperCamelCase__ = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
UpperCamelCase__ = extract_submodules_state_dict(SCREAMING_SNAKE_CASE_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {"""a.1.a""": 0, """a.2.a""": 2} )
| 244 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ):
super().__init__()
UpperCamelCase__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ = [1, 0]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase__ = hidden_states
UpperCamelCase__ = []
UpperCamelCase__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ = self.transformer_index_for_condition[i]
UpperCamelCase__ = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 244 | 1 |
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int ):
return base * power(_lowerCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
lowercase : int = int(input("Enter the base: ").strip())
lowercase : Any = int(input("Enter the exponent: ").strip())
lowercase : List[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase : Optional[int] = 1 / result
print(F"""{base} to the power of {exponent} is {result}""") | 171 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : str = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : str = use_input_mask
__UpperCamelCase : int = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = scope
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__UpperCamelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
__UpperCamelCase : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
__UpperCamelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
__UpperCamelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
__UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
__UpperCamelCase : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase : Tuple = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Tuple = False
lowercase : List[Any] = False
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : int = input_dict["input_ids"]
__UpperCamelCase : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Any = "single_label_classification"
__UpperCamelCase : List[str] = input_dict["input_ids"]
__UpperCamelCase : Tuple = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : Tuple = "multi_label_classification"
__UpperCamelCase : Any = input_dict["input_ids"]
__UpperCamelCase : str = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Optional[Any] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
__UpperCamelCase : int = original_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : List[Any] = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict = {"type": scaling_type, "factor": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
__UpperCamelCase : Optional[int] = scaled_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : Tuple = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCamelCase : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCamelCase : str = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCamelCase : Any = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCamelCase : Optional[Any] = model(torch.tensor(__UpperCamelCase ) )
__UpperCamelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that "
__UpperCamelCase : Optional[Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCamelCase : Dict = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
__UpperCamelCase : List[Any] = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 171 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_SCREAMING_SNAKE_CASE = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase = """lm_head"""
UpperCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
UpperCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase_ )[0].split(""".""" )[-2]
UpperCamelCase = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
UpperCamelCase = """weight_g"""
elif "weight_v" in name:
UpperCamelCase = """weight_v"""
elif "bias" in name:
UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = """weight"""
else:
UpperCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase = name.split(""".""" )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = UniSpeechConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load_from_json(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 42
UpperCamelCase = 43
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
UpperCamelCase = WavaVecaPhonemeCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
UpperCamelCase = UniSpeechForCTC(lowercase_ )
else:
UpperCamelCase = UniSpeechForPreTraining(lowercase_ )
if is_finetuned:
UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_unispeech.save_pretrained(lowercase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 343 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a__ : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
a__ : Optional[Any] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->str:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE : List[str] = F"""facebook/wmt19-{pair}"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = bleu_data[pair]['''src''']
SCREAMING_SNAKE_CASE : Dict = bleu_data[pair]['''tgt''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
| 19 |
from math import pi, sqrt, tan
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a__ , 2 ) * torus_radius * tube_radius
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE : int = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 19 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return abs(_UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A_ , A_ : Tuple = y, x % y
return abs(_UpperCAmelCase )
def UpperCAmelCase__ ( ):
"""simple docstring"""
try:
A_ : List[str] = input('Enter two integers separated by comma (,): ' ).split(',' )
A_ : List[str] = int(nums[0] )
A_ : List[Any] = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(_UpperCAmelCase , _UpperCAmelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_UpperCAmelCase , _UpperCAmelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main() | 286 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
a_ = {
'google/pegasus-xsum': 512,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PegasusTokenizer
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , snake_case : Tuple=None , snake_case : Optional[int]=None , snake_case : Tuple="<pad>" , snake_case : Dict="</s>" , snake_case : List[Any]="<unk>" , snake_case : str="<mask_2>" , snake_case : int="<mask_1>" , snake_case : Union[str, Any]=None , snake_case : int=1_0_3 , **snake_case : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(snake_case , snake_case ):
raise TypeError(
f"additional_special_tokens should be of type {type(snake_case )}, but is"
f" {type(snake_case )}" )
UpperCamelCase_ : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(snake_case ) , self.offset - 1 )
]
if len(set(snake_case ) ) != len(snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCamelCase_ : int = additional_special_tokens_extended
else:
UpperCamelCase_ : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , )
UpperCamelCase_ : List[str] = vocab_file
UpperCamelCase_ : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List , snake_case : Optional[List] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case )
elif token_ids_a is None:
return self._special_token_mask(snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Dict , snake_case : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : int = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 50 | import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ = random.Random()
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[int]=None ):
if rng is None:
UpperCamelCase_ : Union[str, Any] = global_rng
UpperCamelCase_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : str=7 , snake_case : Tuple=4_0_0 , snake_case : List[Any]=2_0_0_0 , snake_case : Optional[Any]=2_4 , snake_case : Tuple=2_4 , snake_case : Dict=0.0 , snake_case : Any=1_6_0_0_0 , snake_case : Tuple=True , snake_case : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : int = batch_size
UpperCamelCase_ : str = min_seq_length
UpperCamelCase_ : str = max_seq_length
UpperCamelCase_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ : int = feature_size
UpperCamelCase_ : Optional[int] = num_mel_bins
UpperCamelCase_ : str = padding_value
UpperCamelCase_ : Union[str, Any] = sampling_rate
UpperCamelCase_ : Tuple = return_attention_mask
UpperCamelCase_ : List[str] = do_normalize
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Dict=False , snake_case : List[str]=False ) -> int:
"""simple docstring"""
def _flatten(snake_case : Optional[Any] ):
return list(itertools.chain(*snake_case ) )
if equal_length:
UpperCamelCase_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = SpeechaTextFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = SpeechaTextFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ : Tuple = feature_extractor(snake_case , padding=snake_case , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
UpperCamelCase_ : Union[str, Any] = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : List[str] = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ : int = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase_ : List[str] = np.asarray(snake_case )
UpperCamelCase_ : Any = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Tuple = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Optional[Any] = feature_extractor(
snake_case , padding=snake_case , max_length=snake_case , return_attention_mask=snake_case )
UpperCamelCase_ : List[str] = inputs.input_features
UpperCamelCase_ : List[str] = inputs.attention_mask
UpperCamelCase_ : Optional[int] = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[str] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Optional[Any] = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Any = feature_extractor(
snake_case , max_length=snake_case , padding=snake_case , return_tensors='np' , return_attention_mask=snake_case )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Optional[int] = inputs.attention_mask
UpperCamelCase_ : str = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : str = feature_extractor(
snake_case , padding='max_length' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Any = feature_extractor(
snake_case , padding='longest' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : List[Any] = inputs.attention_mask
UpperCamelCase_ : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCamelCase_ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : int = feature_extractor(
snake_case , padding='longest' , max_length=1_6 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
UpperCamelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCamelCase_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Tuple ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase_ : Optional[Any] = ds.sort('id' ).select(range(snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase_ : str = self._load_datasamples(1 )
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , snake_case , atol=1e-4 ) )
| 50 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase: List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_lowercase: Any = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
_lowercase: Tuple = '|'.join(sys.argv[1:])
_lowercase: Any = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase: List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 227 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = '''lxmert'''
UpperCamelCase__ : Any = {}
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=30_522 , __SCREAMING_SNAKE_CASE : Tuple=768 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[Any]=9_500 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_600 , __SCREAMING_SNAKE_CASE : Optional[int]=400 , __SCREAMING_SNAKE_CASE : int=3_072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=9 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : Any=2_048 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Tuple=6.67 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__SCREAMING_SNAKE_CASE)
| 131 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''gptsan-japanese'''
UpperCamelCase__ : Dict = [
'''past_key_values''',
]
UpperCamelCase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=36_000 , __SCREAMING_SNAKE_CASE : Tuple=1_280 , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=8_192 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]="float32" , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=0.0_02 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=35_998 , __SCREAMING_SNAKE_CASE : Optional[int]=35_995 , __SCREAMING_SNAKE_CASE : List[str]=35_999 , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = d_ff
__a = d_ext
__a = d_spout
__a = num_switch_layers
__a = num_ext_layers
__a = num_switch_layers + num_ext_layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = dropout_rate
__a = layer_norm_epsilon
__a = router_bias
__a = router_jitter_noise
__a = router_dtype
__a = router_ignore_padding_tokens
__a = output_hidden_states
__a = output_attentions
__a = initializer_factor
__a = output_router_logits
__a = use_cache
super().__init__(
separator_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 131 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def a ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCamelCase__ :dict[int, int] = {}
UpperCamelCase__ :Tuple = 2
while True:
UpperCamelCase__ :str = factor_map.pop(__a , __a )
if factor:
UpperCamelCase__ :List[str] = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ :Optional[Any] = factor
else:
UpperCamelCase__ :List[str] = prime
yield prime
prime += 1
def a ( __a = 1e10 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = sieve()
UpperCamelCase__ :str = 1
while True:
UpperCamelCase__ :Optional[Any] = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution()) | 97 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : List[Any] = 42
_UpperCAmelCase : int = None
# Automatically constructed
_UpperCAmelCase : Any = '''dict'''
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : int = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self : Union[str, Any]):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def _SCREAMING_SNAKE_CASE ( self : str):
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = None
# Automatically constructed
_UpperCAmelCase : List[Any] = '''dict'''
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Any = sorted(set(self.languages)) if self.languages else None
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.languages) if self.languages else None
def __call__( self : Optional[int]):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = set(self.languages)
if self.languages and set(__A) - lang_set:
raise ValueError(
F"Some languages in example ({', '.join(sorted(set(__A) - lang_set))}) are not in valid set ({', '.join(__A)}).")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_: Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(__A , __A):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = zip(*sorted(__A))
return {"language": languages, "translation": translations}
def _SCREAMING_SNAKE_CASE ( self : int):
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 0 |
__UpperCAmelCase = '''Input must be a string of 8 numbers plus letter'''
__UpperCAmelCase = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCamelCase ( snake_case__ : str ) -> bool:
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : List[str] = F"""Expected string as input, found {type(snake_case__ ).__name__}"""
raise TypeError(snake_case__ )
UpperCamelCase : Dict = spanish_id.replace('-' , '' ).upper()
if len(snake_case__ ) != 9:
raise ValueError(snake_case__ )
try:
UpperCamelCase : List[Any] = int(spanish_id_clean[0:8] )
UpperCamelCase : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case__ ) from ex
if letter.isdigit():
raise ValueError(snake_case__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Any = num_of_nodes
UpperCamelCase : list[list[int]] = []
UpperCamelCase : dict[int, int] = {}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase : Dict = self.find_component(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(SCREAMING_SNAKE_CASE_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase : Union[str, Any] = self.find_component(SCREAMING_SNAKE_CASE_ )
component_size[u_node] += component_size[v_node]
self.set_component(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> None:
UpperCamelCase : int = []
UpperCamelCase : int = 0
UpperCamelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = edge
UpperCamelCase : str = self.m_component[u]
UpperCamelCase : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase : Any = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = edge
UpperCamelCase : List[Any] = self.m_component[u]
UpperCamelCase : Tuple = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCamelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_( UpperCAmelCase__ ):
__lowercase : Optional[Any] = 'speech_to_text'
__lowercase : Any = ['past_key_values']
__lowercase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,__UpperCAmelCase=1_0000 ,__UpperCAmelCase=12 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=4 ,__UpperCAmelCase=6 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="relu" ,__UpperCAmelCase=256 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase=6000 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=2 ,__UpperCAmelCase=(5, 5) ,__UpperCAmelCase=1024 ,__UpperCAmelCase=80 ,__UpperCAmelCase=1 ,**__UpperCAmelCase ,) -> Optional[Any]:
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Union[str, Any] = d_model
lowerCAmelCase__ : Optional[Any] = encoder_ffn_dim
lowerCAmelCase__ : Dict = encoder_layers
lowerCAmelCase__ : int = encoder_attention_heads
lowerCAmelCase__ : List[Any] = decoder_ffn_dim
lowerCAmelCase__ : List[str] = decoder_layers
lowerCAmelCase__ : str = decoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : Optional[int] = activation_dropout
lowerCAmelCase__ : Dict = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : List[Any] = decoder_layerdrop
lowerCAmelCase__ : List[str] = use_cache
lowerCAmelCase__ : List[str] = encoder_layers
lowerCAmelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase__ : Tuple = max_source_positions
lowerCAmelCase__ : Dict = max_target_positions
lowerCAmelCase__ : Tuple = num_conv_layers
lowerCAmelCase__ : Tuple = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = conv_channels
lowerCAmelCase__ : Optional[Any] = input_feat_per_channel
lowerCAmelCase__ : Optional[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,is_encoder_decoder=_SCREAMING_SNAKE_CASE ,decoder_start_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
| 360 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=18 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Tuple = max_resolution
lowerCAmelCase__ : List[Any] = size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Optional[int] = do_convert_rgb
lowerCAmelCase__ : str = [512, 1024, 2048, 4096]
lowerCAmelCase__ : int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def UpperCAmelCase_ ( self ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase ,stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = self.image_processor_tester.prepare_dummy_image()
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : str = 2048
lowerCAmelCase__ : Tuple = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.0_6_0_6 ) ,atol=1E-3 ,rtol=1E-3 ) )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processor
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : str = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Any = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Any:
# Initialize image_processor
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Tuple = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowerCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
lowerCAmelCase__ : Optional[Any] = """Hello"""
lowerCAmelCase__ : List[str] = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ,header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : str = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ,header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processor
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
lowerCAmelCase__ : Tuple = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : Any = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : int = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processor
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Optional[int] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : Dict = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = PixaStructImageProcessingTester(self ,num_channels=4 )
lowerCAmelCase__ : str = 3
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Dict = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : int = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
| 184 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase__ : Union[str, Any] ={'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 118 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=12 , _A=7 , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , _A=0 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = projection_dim
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__SCREAMING_SNAKE_CASE = input_mask.numpy()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_mask.shape
__SCREAMING_SNAKE_CASE = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def _A ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFBlipTextModel(config=_A )
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , training=_A )
__SCREAMING_SNAKE_CASE = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Tuple = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlipTextModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _A ( self ):
'''simple docstring'''
pass
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _A ( self , _A=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 118 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCAmelCase ( a__ , a__=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __lowerCAmelCase ( a__ , a__=0 ) -> Any:
__a = []
for old_item in old_list:
__a = old_item.replace('''in_layers.0''' , '''norm1''' )
__a = new_item.replace('''in_layers.2''' , '''conv1''' )
__a = new_item.replace('''out_layers.0''' , '''norm2''' )
__a = new_item.replace('''out_layers.3''' , '''conv2''' )
__a = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
__a = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
__a = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowerCAmelCase ( a__ , a__=0 ) -> Tuple:
__a = []
for old_item in old_list:
__a = old_item
__a = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
__a = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
__a = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
__a = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
__a = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowerCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , a__=None ) -> str:
assert isinstance(a__ , a__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__a = old_checkpoint[path]
__a = old_tensor.shape[0] // 3
__a = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__a = old_tensor.shape[0] // config['''num_head_channels'''] // 3
__a = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__a , __a , __a = old_tensor.split(channels // num_heads , dim=1 )
__a = query.reshape(a__ )
__a = key.reshape(a__ )
__a = value.reshape(a__ )
for path in paths:
__a = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__a = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
__a = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
__a = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
__a = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__a = old_checkpoint[path['''old''']][:, :, 0]
else:
__a = old_checkpoint[path['''old''']]
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = {}
__a = checkpoint['''time_embed.0.weight''']
__a = checkpoint['''time_embed.0.bias''']
__a = checkpoint['''time_embed.2.weight''']
__a = checkpoint['''time_embed.2.bias''']
__a = checkpoint['''input_blocks.0.0.weight''']
__a = checkpoint['''input_blocks.0.0.bias''']
__a = checkpoint['''out.0.weight''']
__a = checkpoint['''out.0.bias''']
__a = checkpoint['''out.2.weight''']
__a = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the middle blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the output blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
for i in range(1 , a__ ):
__a = (i - 1) // (config['''num_res_blocks'''] + 1)
__a = (i - 1) % (config['''num_res_blocks'''] + 1)
__a = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
__a = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
__a = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
__a = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
__a = renew_resnet_paths(a__ )
__a = {'''old''': F"""input_blocks.{i}.0""", '''new''': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__a = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path, resnet_op] , config=a__ )
if len(a__ ):
__a = renew_attention_paths(a__ )
__a = {
'''old''': F"""input_blocks.{i}.1""",
'''new''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a = {
F"""input_blocks.{i}.1.qkv.bias""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=a__ , config=a__ , )
__a = middle_blocks[0]
__a = middle_blocks[1]
__a = middle_blocks[2]
__a = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
__a = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
__a = renew_attention_paths(a__ )
__a = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
a__ , a__ , a__ , attention_paths_to_split=a__ , config=a__ )
for i in range(a__ ):
__a = i // (config['''num_res_blocks'''] + 1)
__a = i % (config['''num_res_blocks'''] + 1)
__a = [shave_segments(a__ , 2 ) for name in output_blocks[i]]
__a = {}
for layer in output_block_layers:
__a , __a = layer.split('''.''' )[0], shave_segments(a__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a__ )
else:
__a = [layer_name]
if len(a__ ) > 1:
__a = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
__a = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
__a = renew_resnet_paths(a__ )
__a = renew_resnet_paths(a__ )
__a = {'''old''': F"""output_blocks.{i}.0""", '''new''': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__a = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
__a = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
__a = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(a__ ) == 2:
__a = []
if len(a__ ):
__a = renew_attention_paths(a__ )
__a = {
'''old''': F"""output_blocks.{i}.1""",
'''new''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a = {
F"""output_blocks.{i}.1.qkv.bias""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=a__ , )
else:
__a = renew_resnet_paths(a__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__a = '''.'''.join(['''output_blocks''', str(a__ ), path['''old''']] )
__a = '''.'''.join(['''up_blocks''', str(a__ ), '''resnets''', str(a__ ), path['''new''']] )
__a = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
A : str = parser.parse_args()
A : int = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A : int = json.loads(f.read())
A : int = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A : str = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A : Optional[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
A : Any = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
A : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : List[str] = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = params
snake_case_ : Dict = np.array(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = np.array([len(_SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> int:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[Any]:
return len(self.lengths )
def _lowerCAmelCase ( self ) -> int:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[int] = self.params.max_model_input_size
snake_case_ : str = self.lengths > max_len
logger.info(f'''Splitting {sum(_SCREAMING_SNAKE_CASE )} too long sequences.''' )
def divide_chunks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return [l[i : i + n] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )]
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = []
if self.params.mlm:
snake_case_ , snake_case_ : Union[str, Any] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
snake_case_ , snake_case_ : Any = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[Any] = np.insert(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
snake_case_ : Optional[int] = np.insert(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_SCREAMING_SNAKE_CASE )
new_tok_ids.extend(_SCREAMING_SNAKE_CASE )
new_lengths.extend([len(_SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
snake_case_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = np.array(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Union[str, Any] = len(self )
snake_case_ : List[Any] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Any = self.lengths[indices]
snake_case_ : Optional[int] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCAmelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids["unk_token"]
snake_case_ : Tuple = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Optional[Any] = (unk_occs / self.lengths) < 0.5
snake_case_ : List[str] = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCAmelCase ( self ) -> Any:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : int = [t[0] for t in batch]
snake_case_ : Tuple = [t[1] for t in batch]
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
# Max for paddings
snake_case_ : Optional[Any] = max(_SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
snake_case_ : Union[str, Any] = self.params.special_tok_ids["pad_token"]
else:
snake_case_ : List[Any] = self.params.special_tok_ids["unk_token"]
snake_case_ : Optional[int] = [list(t.astype(_SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(_SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(_SCREAMING_SNAKE_CASE )
assert all(len(_SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
snake_case_ : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t
| 36 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'conditional_detr'
A : Optional[int] = ['past_key_values']
A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = backbone_config.get("model_type" )
snake_case_ : str = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = use_timm_backbone
snake_case_ : Optional[Any] = backbone_config
snake_case_ : str = num_channels
snake_case_ : Optional[Any] = num_queries
snake_case_ : Optional[Any] = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : int = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = activation_dropout
snake_case_ : List[Any] = activation_function
snake_case_ : Dict = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : int = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : int = auxiliary_loss
snake_case_ : int = position_embedding_type
snake_case_ : List[str] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Tuple = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : Union[str, Any] = mask_loss_coefficient
snake_case_ : Tuple = dice_loss_coefficient
snake_case_ : List[str] = cls_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : List[str] = giou_loss_coefficient
snake_case_ : Any = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ) -> int:
return self.d_model
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = version.parse('1.11' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-5
@property
def _lowerCAmelCase ( self ) -> int:
return 12
| 36 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase__ : Dict =re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCAmelCase__ : List[str] =None
def __lowercase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=a__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=a__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__SCREAMING_SNAKE_CASE = bool(qa['answers']['text'] )
return qid_to_has_ans
def __lowercase ( a__ ) -> Dict:
def remove_articles(a__ ):
return ARTICLES_REGEX.sub(' ' , a__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def __lowercase ( a__ ) -> Any:
if not s:
return []
return normalize_answer(a__ ).split()
def __lowercase ( a__ , a__ ) -> str:
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def __lowercase ( a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = get_tokens(a__ )
__SCREAMING_SNAKE_CASE = get_tokens(a__ )
__SCREAMING_SNAKE_CASE = collections.Counter(a__ ) & collections.Counter(a__ )
__SCREAMING_SNAKE_CASE = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(a__ )
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(a__ )
__SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowercase ( a__ , a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__SCREAMING_SNAKE_CASE = qa['id']
__SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
__SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
__SCREAMING_SNAKE_CASE = max(compute_exact(a__ , a__ ) for a in gold_answers )
__SCREAMING_SNAKE_CASE = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowercase ( a__ , a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
__SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
__SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid] )
else:
__SCREAMING_SNAKE_CASE = s
return new_scores
def __lowercase ( a__ , a__ , a__=None ) -> str:
if not qid_list:
__SCREAMING_SNAKE_CASE = len(a__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
__SCREAMING_SNAKE_CASE = len(a__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __lowercase ( a__ , a__ , a__ ) -> Optional[Any]:
for k in new_eval:
__SCREAMING_SNAKE_CASE = new_eval[k]
def __lowercase ( a__ , a__ , a__ , a__ ) -> List[str]:
plt.step(a__ , a__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(a__ , a__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def __lowercase ( a__ , a__ , a__ , a__ , a__=None , a__=None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda a__ : na_probs[k] )
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = [1.0]
__SCREAMING_SNAKE_CASE = [0.0]
__SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__SCREAMING_SNAKE_CASE = true_pos / float(i + 1 )
__SCREAMING_SNAKE_CASE = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 100.0 * avg_prec}
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
__SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__SCREAMING_SNAKE_CASE = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
__SCREAMING_SNAKE_CASE = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
__SCREAMING_SNAKE_CASE = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
__SCREAMING_SNAKE_CASE = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(a__ , a__ , 'pr_exact' )
merge_eval(a__ , a__ , 'pr_f1' )
merge_eval(a__ , a__ , 'pr_oracle' )
def __lowercase ( a__ , a__ , a__ , a__ ) -> str:
if not qid_list:
return
__SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
__SCREAMING_SNAKE_CASE = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(a__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __lowercase ( a__ , a__ , a__ , a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__SCREAMING_SNAKE_CASE = num_no_ans
__SCREAMING_SNAKE_CASE = cur_score
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda a__ : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
__SCREAMING_SNAKE_CASE = cur_score
__SCREAMING_SNAKE_CASE = na_probs[qid]
return 100.0 * best_score / len(a__ ), best_thresh
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = find_best_thresh(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = find_best_thresh(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = best_exact
__SCREAMING_SNAKE_CASE = exact_thresh
__SCREAMING_SNAKE_CASE = best_fa
__SCREAMING_SNAKE_CASE = fa_thresh
def __lowercase ( ) -> Union[str, Any]:
with open(OPTS.data_file ) as f:
__SCREAMING_SNAKE_CASE = json.load(a__ )
__SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file ) as f:
__SCREAMING_SNAKE_CASE = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__SCREAMING_SNAKE_CASE = json.load(a__ )
else:
__SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
__SCREAMING_SNAKE_CASE = make_qid_to_has_ans(a__ ) # maps qid to True/False
__SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
__SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_raw_scores(a__ , a__ )
__SCREAMING_SNAKE_CASE = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
__SCREAMING_SNAKE_CASE = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
__SCREAMING_SNAKE_CASE = make_eval_dict(a__ , a__ )
if has_ans_qids:
__SCREAMING_SNAKE_CASE = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'HasAns' )
if no_ans_qids:
__SCREAMING_SNAKE_CASE = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 257 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=[32, 64, 1_28] ,SCREAMING_SNAKE_CASE__=[1, 2, 1] ,SCREAMING_SNAKE_CASE__=[2, 2, 4] ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=2.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=8 ,SCREAMING_SNAKE_CASE__=["stage1", "stage2"] ,SCREAMING_SNAKE_CASE__=[1, 2] ,) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = parent
__SCREAMING_SNAKE_CASE :List[str] = batch_size
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :List[str] = patch_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :List[str] = embed_dim
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_sizes
__SCREAMING_SNAKE_CASE :Tuple = depths
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_heads
__SCREAMING_SNAKE_CASE :List[Any] = window_size
__SCREAMING_SNAKE_CASE :Optional[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :Union[str, Any] = qkv_bias
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[int] = drop_path_rate
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = use_absolute_embeddings
__SCREAMING_SNAKE_CASE :Tuple = patch_norm
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :List[str] = is_training
__SCREAMING_SNAKE_CASE :Any = scope
__SCREAMING_SNAKE_CASE :Any = use_labels
__SCREAMING_SNAKE_CASE :Optional[int] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Any = encoder_stride
__SCREAMING_SNAKE_CASE :Optional[Any] = out_features
__SCREAMING_SNAKE_CASE :int = out_indices
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = FocalNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :str = model(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__SCREAMING_SNAKE_CASE :Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :Optional[int] = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[str] = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = FocalNetForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :Tuple = 1
__SCREAMING_SNAKE_CASE :Any = FocalNetForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE :List[str] = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[str] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :List[str] = 1
__SCREAMING_SNAKE_CASE :Optional[int] = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = FocalNetModelTester(self )
__SCREAMING_SNAKE_CASE :Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,embed_dim=37 ,has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE :Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,nn.Linear ) )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE :Tuple = model_class(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[str] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
__SCREAMING_SNAKE_CASE :Dict = outputs.hidden_states
__SCREAMING_SNAKE_CASE :Optional[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
# FocalNet has a different seq_length
__SCREAMING_SNAKE_CASE :Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = reshaped_hidden_states[0].shape
__SCREAMING_SNAKE_CASE :List[Any] = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE :Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE :str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Optional[int] = 3
__SCREAMING_SNAKE_CASE :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE :str = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE :Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE :List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE :int = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE :Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width) )
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = FocalNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.default_image_processor
__SCREAMING_SNAKE_CASE :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__SCREAMING_SNAKE_CASE :List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :str = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,2_81 )
@require_torch
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int = FocalNetConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = FocalNetModelTester(self ) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
__lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
__lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase_ ( self :List[str] )-> int:
A__ = self.task_name.lower()
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """train"""
__lowercase = """dev"""
__lowercase = """test"""
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = 42
__lowercase = 42
__lowercase = 42
def __init__( self :List[str] , lowercase_ :GlueDataTrainingArguments , lowercase_ :PreTrainedTokenizerBase , lowercase_ :Optional[int] = None , lowercase_ :Union[str, Split] = Split.train , lowercase_ :Optional[str] = None , )-> Tuple:
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowercase_ , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(lowercase_ , lowercase_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__, A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(lowercase_ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(F"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , lowercase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self :Optional[int] )-> int:
return len(self.features )
def __getitem__( self :Any , lowercase_ :int )-> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self :int )-> List[Any]:
return self.label_list
| 237 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
@register_to_config
def __init__( self , A__ , A__ = None , A__ = None ):
super().__init__()
A__ : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
A__ : List[str] = torch.zeros(A__ , A__ )
else:
A__ : str = None
A__ : Optional[int] = torch.nn.Parameter(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: VQModel
UpperCAmelCase__: CLIPTextModel
UpperCAmelCase__: CLIPTokenizer
UpperCAmelCase__: TransformeraDModel
UpperCAmelCase__: LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__: VQDiffusionScheduler
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , ):
super().__init__()
self.register_modules(
vqvae=A__ , transformer=A__ , text_encoder=A__ , tokenizer=A__ , scheduler=A__ , learned_classifier_free_sampling_embeddings=A__ , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[int] = len(A__ ) if isinstance(A__ , A__ ) else 1
# get prompt text embeddings
A__ : int = self.tokenizer(
A__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
A__ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
A__ : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A__ )
# duplicate text embeddings for each generation per prompt
A__ : Union[str, Any] = prompt_embeds.repeat_interleave(A__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
A__ : Any = self.learned_classifier_free_sampling_embeddings.embeddings
A__ : int = negative_prompt_embeds.unsqueeze(0 ).repeat(A__ , 1 , 1 )
else:
A__ : Optional[Any] = [""""""] * batch_size
A__ : Optional[int] = text_input_ids.shape[-1]
A__ : int = self.tokenizer(
A__ , padding="""max_length""" , max_length=A__ , truncation=A__ , return_tensors="""pt""" , )
A__ : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
A__ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ : Union[str, Any] = negative_prompt_embeds.shape[1]
A__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , A__ , 1 )
A__ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A__ , A__ = 100 , A__ = 5.0 , A__ = 1.0 , A__ = 1 , A__ = None , A__ = None , A__ = "pil" , A__ = True , A__ = None , A__ = 1 , ):
if isinstance(A__ , A__ ):
A__ : Any = 1
elif isinstance(A__ , A__ ):
A__ : Optional[Any] = len(A__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A__ )}""" )
A__ : Tuple = batch_size * num_images_per_prompt
A__ : List[str] = guidance_scale > 1.0
A__ : List[str] = self._encode_prompt(A__ , A__ , A__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A__ )}.""" )
# get the initial completely masked latents unless the user supplied it
A__ : List[str] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
A__ : int = self.transformer.num_vector_embeds - 1
A__ : Tuple = torch.full(A__ , A__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
A__ : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A__ , device=self.device )
A__ : Any = self.scheduler.timesteps.to(self.device )
A__ : Tuple = latents
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the sample if we are doing classifier free guidance
A__ : int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
A__ : Optional[Any] = self.transformer(A__ , encoder_hidden_states=A__ , timestep=A__ ).sample
if do_classifier_free_guidance:
A__ , A__ : int = model_output.chunk(2 )
A__ : str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A__ , dim=1 , keepdim=A__ )
A__ : Optional[Any] = self.truncate(A__ , A__ )
# remove `log(0)`'s (`-inf`s)
A__ : str = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
A__ : int = self.scheduler.step(A__ , timestep=A__ , sample=A__ , generator=A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
A__ : Any = self.vqvae.config.vq_embed_dim
A__ : int = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
A__ : str = self.vqvae.quantize.get_codebook_entry(A__ , shape=A__ )
A__ : Tuple = self.vqvae.decode(A__ , force_not_quantize=A__ ).sample
A__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
A__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : int = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
def __A ( self , A__ , A__ ):
A__ , A__ : Union[str, Any] = torch.sort(A__ , 1 , descending=A__ )
A__ : List[Any] = torch.exp(A__ )
A__ : Optional[int] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
A__ : List[str] = torch.full_like(keep_mask[:, 0:1, :] , A__ )
A__ : List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
A__ : List[str] = keep_mask[:, :-1, :]
A__ : Dict = keep_mask.gather(1 , indices.argsort(1 ) )
A__ : Optional[Any] = log_p_x_0.clone()
A__ : int = -torch.inf # -inf = log(0)
return rv
| 141 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''falcon'''
UpperCAmelCase__: Any = ['''past_key_values''']
def __init__( self , A__=6_5024 , A__=4544 , A__=32 , A__=71 , A__=1e-5 , A__=0.0_2 , A__=True , A__=0.0 , A__=0.0 , A__=None , A__=False , A__=False , A__=True , A__=True , A__=False , A__=11 , A__=11 , **A__ , ):
A__ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
A__ : Union[str, Any] = kwargs.pop("""n_embed""" , A__ )
A__ : Optional[Any] = hidden_size if n_embed is None else n_embed
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[Any] = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : Tuple = use_cache
A__ : str = hidden_dropout
A__ : List[str] = attention_dropout
A__ : List[Any] = bos_token_id
A__ : Optional[Any] = eos_token_id
A__ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : List[str] = alibi
A__ : Tuple = new_decoder_architecture
A__ : List[str] = multi_query # Ignored when new_decoder_architecture is True
A__ : List[Any] = parallel_attn
A__ : int = bias
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@property
def __A ( self ):
return self.hidden_size // self.num_attention_heads
@property
def __A ( self ):
return not self.alibi
| 141 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
a ={
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''detr'''
_UpperCAmelCase : Optional[Any] = ['''past_key_values''']
_UpperCAmelCase : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Any=1_0_0 ,SCREAMING_SNAKE_CASE__ : Dict=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : List[str]=6 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[str]=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str="relu" ,SCREAMING_SNAKE_CASE__ : Tuple=2_5_6 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=1.0 ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Optional[int]="sine" ,SCREAMING_SNAKE_CASE__ : List[Any]="resnet50" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : List[Any]=1 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowerCamelCase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = backbone_config.get('model_type')
__lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = None, None, None
__lowerCamelCase : Dict = use_timm_backbone
__lowerCamelCase : str = backbone_config
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Tuple = num_queries
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : Union[str, Any] = encoder_ffn_dim
__lowerCamelCase : str = encoder_layers
__lowerCamelCase : Optional[int] = encoder_attention_heads
__lowerCamelCase : int = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : Any = decoder_attention_heads
__lowerCamelCase : List[str] = dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Optional[Any] = activation_dropout
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : str = encoder_layerdrop
__lowerCamelCase : Optional[Any] = decoder_layerdrop
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : int = position_embedding_type
__lowerCamelCase : Optional[Any] = backbone
__lowerCamelCase : Union[str, Any] = use_pretrained_backbone
__lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
__lowerCamelCase : List[str] = class_cost
__lowerCamelCase : Tuple = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : str = dice_loss_coefficient
__lowerCamelCase : int = bbox_loss_coefficient
__lowerCamelCase : List[Any] = giou_loss_coefficient
__lowerCamelCase : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : str):
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.d_model
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : List[Any]):
return cls(backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
__lowerCamelCase : int = self.backbone_config.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : List[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def lowerCAmelCase ( self : Tuple):
return 1E-5
@property
def lowerCAmelCase ( self : Optional[int]):
return 1_2
| 73 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowercase__ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(lowercase__ ) )[2:]
lowercase__ = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = "▁"
_lowerCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_lowerCamelCase : List[Any] = {
"facebook/xglm-564M": 2048,
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase = 7
UpperCamelCase = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCamelCase__ )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ ))
def A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def A ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 355 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (UnCLIPScheduler,)
def A ( self : Union[str, Any] , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**UpperCamelCase__ )
return config
def A ( self : str ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(2_5 )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase = None
else:
UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
pass
| 249 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A : List[str] =get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowercase ( lowercase_ , unittest.TestCase ):
a = DebertaVaTokenizer
a = DebertaVaTokenizerFast
a = True
a = True
def lowerCamelCase_ ( self: Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : List[Any] = DebertaVaTokenizer(UpperCamelCase__ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : str = """this is a test"""
lowerCamelCase__ : Tuple = """this is a test"""
return input_text, output_text
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = """<pad>"""
lowerCamelCase__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(UpperCamelCase__ ) , 30_001 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[str] = """ \tHeLLo!how \n Are yoU? """
lowerCamelCase__ : List[str] = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Dict = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase__ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : str = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowerCamelCase__ : Any = DebertaVaTokenizer(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = DebertaVaTokenizerFast(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Union[str, Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowerCamelCase__ : str = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
lowerCamelCase__ : List[str] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Dict = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowerCamelCase__ : Dict = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = """ \tHeLLo!how \n Are yoU? """
lowerCamelCase__ : List[str] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
lowerCamelCase__ : Any = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
lowerCamelCase__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : List[str] = self.get_rust_tokenizer()
lowerCamelCase__ : List[str] = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Dict = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : str = tokenizer.encode(UpperCamelCase__ )
lowerCamelCase__ : int = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : Optional[int] = [13, 1, 4_398, 25, 21, 1_289]
lowerCamelCase__ : Tuple = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
lowerCamelCase__ : Tuple = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
lowerCamelCase__ : Tuple = DebertaVaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase__ : int = DebertaVaTokenizerFast(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# fmt: off
lowerCamelCase__ : Tuple = """I was born in 92000, and this is falsé."""
lowerCamelCase__ : List[str] = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
lowerCamelCase__ : Tuple = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
lowerCamelCase__ : Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
lowerCamelCase__ : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Tuple = DebertaVaTokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase__ : Tuple = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase__ , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Dict = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 41 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : int=3 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Optional[Any]=4_00 , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=1 / 2_55 , __lowerCamelCase : Any=True , ) -> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A : Dict = parent
A : Optional[int] = batch_size
A : Any = num_channels
A : List[Any] = min_resolution
A : Optional[Any] = max_resolution
A : List[Any] = do_resize
A : Tuple = size
A : str = do_normalize
A : Tuple = image_mean
A : Tuple = image_std
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : List[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=False ) -> List[Any]:
if not batched:
A : Union[str, Any] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
A , A : List[str] = image.size
else:
A , A : str = image.shape[1], image.shape[2]
if w < h:
A : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
A : str = self.size["shortest_edge"]
elif w > h:
A : Union[str, Any] = self.size["shortest_edge"]
A : List[Any] = int(self.size["shortest_edge"] * w / h )
else:
A : Optional[Any] = self.size["shortest_edge"]
A : Optional[Any] = self.size["shortest_edge"]
else:
A : Tuple = []
for image in image_inputs:
A , A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
A : Tuple = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
A : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
# Initialize image_processing
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
A : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A , A : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
# Initialize image_processing
A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A , A : List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
A , A : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
# prepare image and target
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A : Optional[int] = json.loads(f.read() )
A : Optional[Any] = {"image_id": 3_97_69, "annotations": target}
# encode them
A : Any = DetaImageProcessor()
A : Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
A : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
A : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
A : Optional[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
A : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
A : Optional[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
A : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
A : Union[str, Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
A : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
# prepare image, target and masks_path
A : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A : Any = json.loads(f.read() )
A : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
A : Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
A : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
A : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
A : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
A : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
A : Tuple = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
A : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
A : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
A : List[Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
A : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
A : Union[str, Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) | 256 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = set()
A : int = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Any = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
A : Union[str, Any] = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
A : Union[str, Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = set()
A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase ):
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 256 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case_ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case_ = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def lowerCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
SCREAMING_SNAKE_CASE :List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE :List[Any] = torch.nn.Linear(1_00, 2_00)
SCREAMING_SNAKE_CASE :List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE :List[str] = ''''''
SCREAMING_SNAKE_CASE :int = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 159 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Dict = '▁'
_lowerCamelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_lowerCamelCase : List[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_lowerCamelCase : Any = {'vinai/bartpho-syllable': 1024}
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Optional[int]="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : int = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A = vocab_file
A = monolingual_vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A = {}
A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
A = cnt
cnt += 1
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
A = line.strip().split()[0]
A = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
A = len(self.fairseq_tokens_to_ids )
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Dict ):
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : List[str] , _lowerCAmelCase : Optional[int] ):
A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : Any = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def A (self : Any , _lowerCAmelCase : str , _lowerCAmelCase : int = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A (self : str ):
return len(self.fairseq_ids_to_tokens )
def A (self : List[str] ):
A = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A (self : Dict , _lowerCAmelCase : int ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def A (self : int , _lowerCAmelCase : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] ):
return self.fairseq_ids_to_tokens[index]
def A (self : int , _lowerCAmelCase : Optional[int] ):
A = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def A (self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 367 |
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ):
snake_case_ : Union[str, Any] = {}
snake_case_ : int = 2
while True:
snake_case_ : Any = factor_map.pop(_lowerCAmelCase , _lowerCAmelCase )
if factor:
snake_case_ : Dict = factor + prime
while x in factor_map:
x += factor
snake_case_ : int = factor
else:
snake_case_ : List[Any] = prime
yield prime
prime += 1
def __lowercase ( _a = 1E10 ):
snake_case_ : str = sieve()
snake_case_ : Optional[int] = 1
while True:
snake_case_ : str = next(_lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 264 |
import math
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :Union[str, Any]=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
UpperCAmelCase__ = n
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # adjacency matrix for weight
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = w
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Dict ) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 169 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 65 |
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
A_ , A_ : str = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "informer"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: List[str] , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: str = "student_t" , UpperCamelCase: str = "nll" , UpperCamelCase: int = 1 , UpperCamelCase: List[int] = None , UpperCamelCase: Optional[Union[str, bool]] = "mean" , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: int = 64 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: bool = True , UpperCamelCase: str = "gelu" , UpperCamelCase: float = 0.05 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 0.02 , UpperCamelCase: Tuple=True , UpperCamelCase: str = "prob" , UpperCamelCase: int = 5 , UpperCamelCase: bool = True , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Informer
A__ = attention_type
A__ = sampling_factor
A__ = distil
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase_ ( __lowerCamelCase ):
'''simple docstring'''
lowercase_ = """Salesforce/blip-image-captioning-base"""
lowercase_ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase_ = """image_captioner"""
lowercase_ = AutoModelForVisionaSeq
lowercase_ = ["""image"""]
lowercase_ = ["""text"""]
def __init__( self : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ):
requires_backends(self , ['vision'] )
super().__init__(*__lowercase , **__lowercase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : "Image" ):
return self.pre_processor(images=__lowercase , return_tensors='pt' )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Dict ):
return self.model.generate(**__lowercase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any] ):
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0].strip() | 350 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
a_ : List[str] = {"""target_lang""": """fi""", """source_lang""": """en"""}
a_ : Any = """>>zh<<"""
a_ : List[Any] = """Helsinki-NLP/"""
if is_torch_available():
a_ : List[str] = """pt"""
elif is_tf_available():
a_ : List[str] = """tf"""
else:
a_ : Any = """jax"""
@require_sentencepiece
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MarianTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
def snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = Path(self.tmpdirname )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "</s>"
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase ) , 9 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase_ = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase , batch.input_ids[0] )
lowerCamelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase )
lowerCamelCase_ = [x.name for x in Path(UpperCamelCase ).glob("*" )]
self.assertIn("source.spm" , UpperCamelCase )
MarianTokenizer.from_pretrained(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase , return_tensors=UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def snake_case ( self ):
"""simple docstring"""
# fmt: off
lowerCamelCase_ = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase_ = "Tämä on testi"
lowerCamelCase_ = "This is a test"
lowerCamelCase_ = [76, 7, 2047, 2]
lowerCamelCase_ = [69, 12, 11, 940, 2]
lowerCamelCase_ = tokenizer(UpperCamelCase ).input_ids
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokenizer(text_target=UpperCamelCase ).input_ids
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 55 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.txt"}
UpperCamelCase__ = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
UpperCamelCase__ = {
"openbmb/cpm-ant-10b": 1_024,
}
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = collections.OrderedDict()
with open(a__ , """r""" , encoding="""utf-8""" ) as reader:
UpperCamelCase__ = reader.readlines()
for index, token in enumerate(a__ ):
UpperCamelCase__ = token.rstrip("""\n""" )
UpperCamelCase__ = index
return vocab
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<unk>" , __lowerCAmelCase=200 ):
UpperCamelCase__ = vocab
UpperCamelCase__ = unk_token
UpperCamelCase__ = max_input_chars_per_word
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase__ = 0
UpperCamelCase__ = []
while start < len(__lowerCAmelCase ):
UpperCamelCase__ = len(__lowerCAmelCase )
UpperCamelCase__ = None
while start < end:
UpperCamelCase__ = """""".join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCAmelCase )
UpperCamelCase__ = end
return sub_tokens
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
snake_case : int = False
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<d>" , __lowerCAmelCase="</d>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="</n>" , __lowerCAmelCase="</_>" , __lowerCAmelCase="left" , **__lowerCAmelCase , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__lowerCAmelCase , eod_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , line_token=__lowerCAmelCase , space_token=__lowerCAmelCase , padding_side=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = bod_token
UpperCamelCase__ = eod_token
UpperCamelCase__ = load_vocab(__lowerCAmelCase )
UpperCamelCase__ = self.encoder[space_token]
UpperCamelCase__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self ):
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self ):
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self ):
return self.encoder["\n"]
@property
def _lowerCamelCase ( self ):
return len(self.encoder )
def _lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for x in jieba.cut(__lowerCAmelCase , cut_all=__lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCAmelCase ) )
return output_tokens
def _lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = [i for i in token_ids if i >= 0]
UpperCamelCase__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return token in self.encoder
def _lowerCamelCase ( self , __lowerCAmelCase ):
return "".join(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if os.path.isdir(__lowerCAmelCase ):
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCamelCase__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
UpperCamelCase__ = 0
if " " in self.encoder:
UpperCamelCase__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase__ = self.encoder["""\n"""]
del self.encoder["\n"]
UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase ))
return [1] + ([0] * len(__lowerCAmelCase ))
| 87 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 22 ):
"""simple docstring"""
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 93 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set_counts
SCREAMING_SNAKE_CASE = max(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [1] * num_sets
SCREAMING_SNAKE_CASE = list(range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_parent(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_parent(lowerCamelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = src_parent
SCREAMING_SNAKE_CASE = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE = max(self.max_set ,lowerCamelCase__ )
return True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 193 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[str] = "relu" ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=lowerCamelCase__ ,stride=lowerCamelCase__ ,padding=kernel_size // 2 ,groups=lowerCamelCase__ ,bias=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,stride=lowerCamelCase__ ,bias=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.attention(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCamelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,) ,*[layer(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for _ in range(depth - 1 )] ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : RegNetConfig ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,depth=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(lowerCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ ,hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = RegNetConfig
__snake_case : Union[str, Any] = "regnet"
__snake_case : Optional[Any] = "pixel_values"
__snake_case : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RegNetEncoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.encoder(
lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ ,pooler_output=lowerCamelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(lowerCamelCase__ )
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.LongTensor] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ ,logits=lowerCamelCase__ ,hidden_states=outputs.hidden_states )
| 193 | 1 |
from __future__ import annotations
_UpperCAmelCase : Optional[Any] = "#"
class __lowerCAmelCase :
def __init__( self: Union[str, Any] ):
lowercase :dict = {}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: str ):
lowercase :Union[str, Any] = self._trie
for char in text:
if char not in trie:
lowercase :List[Any] = {}
lowercase :Optional[Any] = trie[char]
lowercase :List[Any] = True
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: str ):
lowercase :Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase :Optional[int] = trie[char]
else:
return []
return self._elements(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: dict ):
lowercase :Union[str, Any] = []
for c, v in d.items():
lowercase :Tuple = [" "] if c == END else [(c + s) for s in self._elements(_lowerCAmelCase )]
result.extend(_lowerCAmelCase )
return tuple(_lowerCAmelCase )
_UpperCAmelCase : int = Trie()
_UpperCAmelCase : Dict = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = trie.find_word(lowerCamelCase )
return tuple(string + word for word in suffixes )
def UpperCAmelCase__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 236 |
import torch
def UpperCAmelCase__ ( ):
if torch.cuda.is_available():
lowercase :Optional[int] = torch.cuda.device_count()
else:
lowercase :Dict = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 236 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
A = [False] * len(UpperCAmelCase )
A = []
queue.append(UpperCAmelCase )
A = True
while queue:
A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase )
A = True
A = u
return visited[t]
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = [-1] * (len(UpperCAmelCase ))
A = 0
while bfs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
A = float("""Inf""" )
A = sink
while s != source:
# Find the minimum value in select path
A = min(UpperCAmelCase , graph[parent[s]][s] )
A = parent[s]
max_flow += path_flow
A = sink
while v != source:
A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A = parent[v]
return max_flow
_lowerCamelCase : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCamelCase , _lowerCamelCase : int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
snake_case_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert("RGB" )
snake_case_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
snake_case_ = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
return image
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Optional[int]:
'''simple docstring'''
if "visual_encoder" in key:
snake_case_ = re.sub("visual_encoder*" , "vision_model.encoder" , lowerCAmelCase_ )
if "blocks" in key:
snake_case_ = re.sub(r"blocks" , "layers" , lowerCAmelCase_ )
if "attn" in key:
snake_case_ = re.sub(r"attn" , "self_attn" , lowerCAmelCase_ )
if "norm1" in key:
snake_case_ = re.sub(r"norm1" , "layer_norm1" , lowerCAmelCase_ )
if "norm2" in key:
snake_case_ = re.sub(r"norm2" , "layer_norm2" , lowerCAmelCase_ )
if "encoder.norm" in key:
snake_case_ = re.sub(r"encoder.norm" , "post_layernorm" , lowerCAmelCase_ )
if "encoder.patch_embed.proj" in key:
snake_case_ = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , lowerCAmelCase_ )
if "encoder.pos_embed" in key:
snake_case_ = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , lowerCAmelCase_ )
if "encoder.cls_token" in key:
snake_case_ = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , lowerCAmelCase_ )
if "self_attn" in key:
snake_case_ = re.sub(r"self_attn.proj" , "self_attn.projection" , lowerCAmelCase_ )
return key
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Any=None )->List[Any]:
'''simple docstring'''
if config_path is not None:
snake_case_ = BlipConfig.from_pretrained(lowerCAmelCase_ )
else:
snake_case_ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
snake_case_ = BlipForConditionalGeneration(lowerCAmelCase_ ).eval()
snake_case_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
snake_case_ = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit="base" )
snake_case_ = pt_model.eval()
snake_case_ = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(lowerCAmelCase_ )
snake_case_ = rename_key(lowerCAmelCase_ )
snake_case_ = value
hf_model.load_state_dict(lowerCAmelCase_ )
snake_case_ = 384
snake_case_ = load_demo_image(image_size=lowerCAmelCase_ , device="cpu" )
snake_case_ = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case_ = tokenizer(["a picture of"] ).input_ids
snake_case_ = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
snake_case_ = hf_model.generate(lowerCAmelCase_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
snake_case_ = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit="base" )
vqa_model.eval()
snake_case_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(lowerCAmelCase_ )
snake_case_ = rename_key(lowerCAmelCase_ )
snake_case_ = value
snake_case_ = BlipForQuestionAnswering(lowerCAmelCase_ )
hf_vqa_model.load_state_dict(lowerCAmelCase_ )
snake_case_ = ["How many dogs are in this image?"]
snake_case_ = tokenizer(lowerCAmelCase_ , return_tensors="pt" ).input_ids
snake_case_ = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
snake_case_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
snake_case_ = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit="base" )
itm_model.eval()
snake_case_ = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(lowerCAmelCase_ )
snake_case_ = rename_key(lowerCAmelCase_ )
snake_case_ = value
snake_case_ = BlipForImageTextRetrieval(lowerCAmelCase_ )
snake_case_ = ["A picture of a woman with a dog sitting in a beach"]
snake_case_ = tokenizer(
lowerCAmelCase_ , return_tensors="pt" , padding="max_length" , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_ )
hf_itm_model.eval()
snake_case_ = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
snake_case_ = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 159 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->int:
'''simple docstring'''
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Optional[int]="" , lowerCAmelCase_ :int="." ):
snake_case_ = []
for k, v in d.items():
snake_case_ = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
snake_case_ = argparse.Namespace()
with open(lowerCAmelCase_ , "r" ) as yaml_file:
try:
snake_case_ = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader )
snake_case_ = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple )->Union[str, Any]:
'''simple docstring'''
snake_case_ = MobileViTVaConfig()
snake_case_ = False
# dataset
if task_name.startswith("imagenet1k_" ):
snake_case_ = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
snake_case_ = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
snake_case_ = 151
snake_case_ = 512
snake_case_ = "ade20k-id2label.json"
snake_case_ = True
elif task_name.startswith("voc_" ):
snake_case_ = 21
snake_case_ = 512
snake_case_ = "pascal-voc-id2label.json"
snake_case_ = True
# orig_config
snake_case_ = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(lowerCAmelCase_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
snake_case_ = "huggingface/label-files"
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Optional[Any] )->Optional[Any]:
'''simple docstring'''
snake_case_ = dct.pop(lowerCAmelCase_ )
snake_case_ = val
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :int=False )->Dict:
'''simple docstring'''
if base_model:
snake_case_ = ""
else:
snake_case_ = "mobilevitv2."
snake_case_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case_ = k[8:]
else:
snake_case_ = k
if ".block." in k:
snake_case_ = k_new.replace(".block." , "." )
if ".conv." in k:
snake_case_ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
snake_case_ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
snake_case_ = k_new.replace("conv_1." , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
snake_case_ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
snake_case_ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
snake_case_ = [0, 1]
elif i == 4:
snake_case_ = [0, 1, 2, 3]
elif i == 5:
snake_case_ = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
snake_case_ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
snake_case_ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
snake_case_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
snake_case_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
snake_case_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
snake_case_ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
snake_case_ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
snake_case_ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
snake_case_ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Optional[int]:
'''simple docstring'''
snake_case_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
snake_case_ = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ )
# load original state_dict
snake_case_ = torch.load(lowerCAmelCase_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
snake_case_ = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
snake_case_ = False
else:
snake_case_ = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
snake_case_ = False
# remove and rename some keys of load the original model
snake_case_ = checkpoint
remove_unused_keys(lowerCAmelCase_ )
snake_case_ = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith("imagenet" ):
snake_case_ = outputs.logits
snake_case_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 159 | 1 |
import numpy as np
import qiskit
def __UpperCamelCase ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
"""simple docstring"""
A : Dict = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
A : List[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
A : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
A : Any = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
A : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
A : Tuple = qiskit.QuantumCircuit(_lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
A : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
A : Optional[Any] = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
A : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
A : Optional[int] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
A : Any = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 115 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[int] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Any = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A : str = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A : List[str] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A : Tuple = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : List[str] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A : Optional[int] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : List[str] = {}
import re
A : Any = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : Optional[Any] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : Optional[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A : Tuple = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_encoder_block_conv_in.match(_lowerCAmelCase )
A : Tuple = regex_match.groups()
A : str = int(groups[2] ) * 2 + int(groups[3] )
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A : Any = re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_encoder_block_resnet.match(_lowerCAmelCase )
A : str = regex_match.groups()
A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A : Any = {"""1""": 1, """3""": 2}[groups[-2]]
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : str = prefix + resnet_block
A : str = re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
A : List[str] = re_encoder_block_proj_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A : Optional[int] = re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
A : Union[str, Any] = re_decoder_block_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A : int = re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_decoder_block_resnet.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : str = {"""1""": 1, """3""": 2}[groups[-2]]
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Tuple = prefix + resnet_block
A : Tuple = re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_decoder_block_proj_in.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Optional[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A : Dict = re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_conv_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A : List[str] = re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
A : Any = re_prior_cond_resnet.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Dict = prefix + resnet_block
A : Union[str, Any] = re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
A : List[Any] = re_prior_cond_proj_in.match(_lowerCAmelCase )
A : Optional[int] = regex_match.groups()
A : Tuple = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A : Optional[int] = re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
A : str = original_key
A : List[str] = replace_key(_lowerCAmelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A : str = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A : Union[str, Any] = original_key
A : Union[str, Any] = original_key
A : List[str] = value
return new_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A : Optional[Any] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCAmelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCAmelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
A : Optional[int] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A : List[Any] = JukeboxConfig.from_pretrained(_lowerCAmelCase )
A : Dict = JukeboxModel(_lowerCAmelCase )
A : str = []
A : Optional[int] = {}
for i, dict_name in enumerate(_lowerCAmelCase ):
A : str = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
A : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A : Dict = old_dic[k]
elif k.endswith(""".w""" ):
A : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : Union[str, Any] = old_dic[k]
else:
A : Optional[int] = old_dic[k]
A : List[str] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
A : List[Any] = fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
A : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 115 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 68 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 68 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = min(_lowerCamelCase ) # min() finds the minimum value
__snake_case : Optional[Any] = max(_lowerCamelCase ) # max() finds the maximum value
__snake_case : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case : List[str] = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
__snake_case : str = count + min_val
i += 1
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.