code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : bool = True, SCREAMING_SNAKE_CASE__ : float = math.inf, SCREAMING_SNAKE_CASE__ : float = -math.inf, SCREAMING_SNAKE_CASE__ : float = math.inf, SCREAMING_SNAKE_CASE__ : float = -math.inf, SCREAMING_SNAKE_CASE__ : bool = False, SCREAMING_SNAKE_CASE__ : float = 100, SCREAMING_SNAKE_CASE__ : float = 0.01, SCREAMING_SNAKE_CASE__ : float = 1, ) -> Any:
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Union[str, Any] = search_prob
UpperCAmelCase_ : int = start_temperate
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : int = None
while not search_end:
UpperCAmelCase_ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ : Union[str, Any] = current_state
scores.append(SCREAMING_SNAKE_CASE__ )
iterations += 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ : Optional[Any] = random.randint(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ) # picking a random neighbor
UpperCAmelCase_ : Optional[int] = neighbors.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ : List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ : Tuple = picked_neighbor
else:
UpperCAmelCase_ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ : str = picked_neighbor
UpperCAmelCase_ : Union[str, Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ : Union[str, Any] = True
else:
UpperCAmelCase_ : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case_ : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : List[Any] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
snake_case_ : List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
snake_case_ : Tuple = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
return (3 * x**2) - (6 * y)
snake_case_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : str = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
snake_case_ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 644
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Any = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Dict = "rwkv"
__a : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self : int , __magic_name__ : Optional[Any]=5_02_77 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : List[Any]=40_96 , __magic_name__ : List[str]=32 , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Dict=1E-5 , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Any=6 , __magic_name__ : Dict=False , __magic_name__ : List[str]=True , **__magic_name__ : Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = context_length
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : Optional[int] = rescale_every
UpperCAmelCase_ : Optional[Any] = use_cache
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : int = eos_token_id
super().__init__(
tie_word_embeddings=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Dict = StableDiffusionInstructPixaPixPipeline
__a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
__a : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : int = CLIPTextModel(__magic_name__ )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Any = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' )
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Optional[int] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : List[str] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : int = '''french fries'''
UpperCAmelCase_ : List[Any] = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : int = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : int = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Tuple = [inputs['''prompt''']] * 2
UpperCAmelCase_ : Tuple = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__magic_name__ ).unsqueeze(0 ).to(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = image / 2 + 0.5
UpperCAmelCase_ : int = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Dict = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ : Dict = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Dict = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = [round(__magic_name__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__magic_name__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Tuple = VaeImageProcessor(do_resize=__magic_name__ , do_normalize=__magic_name__ )
UpperCAmelCase_ : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs_by_type(__magic_name__ , input_image_type='''pt''' ) )[0]
UpperCAmelCase_ : Optional[Any] = components['''vae''']
UpperCAmelCase_ : Tuple = self.get_dummy_inputs_by_type(__magic_name__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ : List[str] = pipe(**__magic_name__ )[0]
UpperCAmelCase_ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__magic_name__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCAmelCase_ : int = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[str] = self.get_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : str = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
def callback_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : torch.FloatTensor ) -> None:
UpperCAmelCase_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : str = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[Any] = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
UpperCAmelCase_ : Dict = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ )
UpperCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ : Optional[Any] = inputs['''image'''].resize((5_04, 5_04) )
UpperCAmelCase_ : Union[str, Any] = '''timbrooks/instruct-pix2pix'''
UpperCAmelCase_ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__magic_name__ , safety_checker=__magic_name__ , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ )
UpperCAmelCase_ : int = output.images[0]
UpperCAmelCase_ : Dict = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
UpperCAmelCase_ : Tuple = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 644
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Tuple = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 1
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : Union[str, Any] = data_utils.TransfoXLTokenizer
snake_case_ : List[Any] = data_utils.TransfoXLCorpus
snake_case_ : Union[str, Any] = data_utils
snake_case_ : Any = data_utils
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(SCREAMING_SNAKE_CASE__, '''rb''' ) as fp:
UpperCAmelCase_ : Dict = pickle.load(SCREAMING_SNAKE_CASE__, encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ : Optional[Any] = corpus.vocab.__dict__
torch.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ : Optional[int] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ : Any = TransfoXLConfig()
else:
UpperCAmelCase_ : Tuple = TransfoXLConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ : Union[str, Any] = TransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = load_tf_weights_in_transfo_xl(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
UpperCAmelCase_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print(F"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}""" )
torch.save(model.state_dict(), SCREAMING_SNAKE_CASE__ )
print(F"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}""" )
with open(SCREAMING_SNAKE_CASE__, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 644
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str]=7_68 ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCAmelCase_ : str = proj_size
UpperCAmelCase_ : Dict = CLIPVisionModel(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = PaintByExampleMapper(__magic_name__ )
UpperCAmelCase_ : Tuple = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : Dict = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Any = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model(pixel_values=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = clip_output.pooler_output
UpperCAmelCase_ : Any = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : Dict = self.final_layer_norm(__magic_name__ )
UpperCAmelCase_ : str = self.proj_out(__magic_name__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __a (nn.Module ):
def __init__( self : Union[str, Any] , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : int = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Dict = config.hidden_size
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(__magic_name__ , __magic_name__ , __magic_name__ , activation_fn='''gelu''' , attention_bias=__magic_name__ )
for _ in range(__magic_name__ )
] )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
for block in self.blocks:
UpperCAmelCase_ : Any = block(__magic_name__ )
return hidden_states
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1E-3 ) )
@slow
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : Tuple = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : int = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1E-3 ) )
| 644
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __a :
def __init__( self : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''
UpperCAmelCase_ : Tuple = ''''''
UpperCAmelCase_ : int = []
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = 2_56
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = 0
def UpperCAmelCase__ ( self : Any , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = cva.imread(__magic_name__ , 0 )
UpperCAmelCase_ : str = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
UpperCAmelCase_ : Union[str, Any] = np.sum(__magic_name__ )
for i in range(len(__magic_name__ ) ):
UpperCAmelCase_ : Dict = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : str = int(last % last )
UpperCAmelCase_ : Tuple = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : List[str] = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Any = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
snake_case_ : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
snake_case_ : Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 644
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __a (unittest.TestCase ):
def __init__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str=7 , __magic_name__ : int=3 , __magic_name__ : List[Any]=18 , __magic_name__ : List[Any]=30 , __magic_name__ : Union[str, Any]=4_00 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=True , __magic_name__ : List[str]=True , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : int = min_resolution
UpperCAmelCase_ : int = max_resolution
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
UpperCAmelCase_ : Dict = [5_12, 10_24, 20_48, 40_96]
UpperCAmelCase_ : Any = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
UpperCAmelCase_ : int = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Optional[Any] = 20_48
UpperCAmelCase_ : int = image_processor(__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
# Initialize image_processor
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
UpperCAmelCase_ : Tuple = '''Hello'''
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : int = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processor
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : Optional[int] = 3
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 644
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __a (lowerCamelCase ):
def __init__( self : int , __magic_name__ : UNetaDModel , __magic_name__ : UNetaDModel , __magic_name__ : DDPMScheduler , __magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[int] = value_function
UpperCAmelCase_ : Tuple = unet
UpperCAmelCase_ : List[Any] = scheduler
UpperCAmelCase_ : List[str] = env
UpperCAmelCase_ : Any = env.get_dataset()
UpperCAmelCase_ : str = {}
for key in self.data.keys():
try:
UpperCAmelCase_ : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase_ : Dict = {}
for key in self.data.keys():
try:
UpperCAmelCase_ : Tuple = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase_ : List[str] = env.observation_space.shape[0]
UpperCAmelCase_ : Union[str, Any] = env.action_space.shape[0]
def UpperCAmelCase__ ( self : int , __magic_name__ : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> Dict:
"""simple docstring"""
if type(__magic_name__ ) is dict:
return {k: self.to_torch(__magic_name__ ) for k, v in x_in.items()}
elif torch.is_tensor(__magic_name__ ):
return x_in.to(self.unet.device )
return torch.tensor(__magic_name__ , device=self.unet.device )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
UpperCAmelCase_ : Union[str, Any] = val.clone()
return x_in
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = x.shape[0]
UpperCAmelCase_ : int = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase_ : Union[str, Any] = torch.full((batch_size,) , __magic_name__ , device=self.unet.device , dtype=torch.long )
for _ in range(__magic_name__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase_ : List[str] = self.value_function(x.permute(0 , 2 , 1 ) , __magic_name__ ).sample
UpperCAmelCase_ : str = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase_ : Union[str, Any] = self.scheduler._get_variance(__magic_name__ )
UpperCAmelCase_ : str = torch.exp(0.5 * posterior_variance )
UpperCAmelCase_ : Union[str, Any] = model_std * grad
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[str] = x.detach()
UpperCAmelCase_ : str = x + scale * grad
UpperCAmelCase_ : int = self.reset_xa(__magic_name__ , __magic_name__ , self.action_dim )
UpperCAmelCase_ : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , __magic_name__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase_ : Dict = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , predict_epsilon=__magic_name__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase_ : Any = self.reset_xa(__magic_name__ , __magic_name__ , self.action_dim )
UpperCAmelCase_ : Union[str, Any] = self.to_torch(__magic_name__ )
return x, y
def __call__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : str=64 , __magic_name__ : Optional[Any]=32 , __magic_name__ : List[str]=2 , __magic_name__ : List[str]=0.1 ) -> int:
"""simple docstring"""
# normalize the observations and create batch dimension
UpperCAmelCase_ : Union[str, Any] = self.normalize(__magic_name__ , '''observations''' )
UpperCAmelCase_ : Dict = obs[None].repeat(__magic_name__ , axis=0 )
UpperCAmelCase_ : List[str] = {0: self.to_torch(__magic_name__ )}
UpperCAmelCase_ : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase_ : Optional[int] = randn_tensor(__magic_name__ , device=self.unet.device )
UpperCAmelCase_ : Union[str, Any] = self.reset_xa(__magic_name__ , __magic_name__ , self.action_dim )
UpperCAmelCase_ : Tuple = self.to_torch(__magic_name__ )
# run the diffusion process
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.run_diffusion(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# sort output trajectories by value
UpperCAmelCase_ : List[str] = y.argsort(0 , descending=__magic_name__ ).squeeze()
UpperCAmelCase_ : Optional[Any] = x[sorted_idx]
UpperCAmelCase_ : List[Any] = sorted_values[:, :, : self.action_dim]
UpperCAmelCase_ : Optional[Any] = actions.detach().cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = self.de_normalize(__magic_name__ , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCAmelCase_ : List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase_ : List[Any] = np.random.randint(0 , __magic_name__ )
UpperCAmelCase_ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 644
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 1
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> datetime:
UpperCAmelCase_ : int = year % 19
UpperCAmelCase_ : Dict = year % 4
UpperCAmelCase_ : int = year % 7
UpperCAmelCase_ : Union[str, Any] = math.floor(year / 100 )
UpperCAmelCase_ : Tuple = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase_ : Tuple = leap_day_inhibits / 4
UpperCAmelCase_ : List[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase_ : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase_ : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase_ : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__, 4, 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__, 4, 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__, 3, 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
snake_case_ : Tuple = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 644
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 1
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
class __a :
def __init__( self : Dict , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = metric_id
class __a :
__a : List[Any] = [MetricMock(lowerCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() )
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
if "tmp_path" in args:
UpperCAmelCase_ : Tuple = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE__, match='''https://huggingface.co/docs/evaluate''' ):
func(*SCREAMING_SNAKE_CASE__ )
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = downstream_dict['''projector.weight''']
UpperCAmelCase_ : List[Any] = downstream_dict['''projector.bias''']
UpperCAmelCase_ : Any = downstream_dict['''model.post_net.linear.weight''']
UpperCAmelCase_ : str = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = downstream_dict['''model.linear.weight''']
UpperCAmelCase_ : Dict = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = downstream_dict['''connector.weight''']
UpperCAmelCase_ : List[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ : Any = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCAmelCase_ : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCAmelCase_ : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCAmelCase_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCAmelCase_ : int = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )
UpperCAmelCase_ : int = checkpoint['''Downstream''']
UpperCAmelCase_ : List[Any] = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
UpperCAmelCase_ : Any = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
UpperCAmelCase_ : Tuple = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith('''ForXVector''' ):
UpperCAmelCase_ : Optional[int] = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
snake_case_ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : NDArray[floataa], SCREAMING_SNAKE_CASE__ : NDArray[floataa], SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : int, ) -> list[float]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = coefficient_matrix.shape
UpperCAmelCase_ , UpperCAmelCase_ : Dict = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase_ : Tuple = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if colsa != 1:
UpperCAmelCase_ : Union[str, Any] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if rowsa != rowsa:
UpperCAmelCase_ : int = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != rowsa:
UpperCAmelCase_ : List[Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(SCREAMING_SNAKE_CASE__ )} and {rowsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
UpperCAmelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE__ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : List[str] = 0
for col in range(SCREAMING_SNAKE_CASE__ ):
if col == row:
UpperCAmelCase_ : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCAmelCase_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase_ : List[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = new_val
return [float(SCREAMING_SNAKE_CASE__ ) for i in new_val]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : NDArray[floataa] ) -> bool:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = table.shape
UpperCAmelCase_ : Tuple = True
for i in range(0, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Tuple = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 1
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
UpperCAmelCase_ : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase_ : Optional[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase_ : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase_ : Union[str, Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase_ : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase_ : Any = tf.placeholder('''float64''', [dim] )
UpperCAmelCase_ : List[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase_ : Union[str, Any] = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase_ : Any = tf.placeholder('''int32''' )
UpperCAmelCase_ : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase_ : Tuple = tf.placeholder('''float''', [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase_ : Dict = tf.reduce_mean(SCREAMING_SNAKE_CASE__, 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase_ : int = tf.placeholder('''float''', [dim] )
UpperCAmelCase_ : Optional[Any] = tf.placeholder('''float''', [dim] )
UpperCAmelCase_ : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase_ : Dict = tf.placeholder('''float''', [noofclusters] )
UpperCAmelCase_ : Dict = tf.argmin(SCREAMING_SNAKE_CASE__, 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase_ : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase_ : Optional[Any] = 100
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Dict = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase_ : List[Any] = [
sess.run(SCREAMING_SNAKE_CASE__, feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase_ : List[Any] = sess.run(
SCREAMING_SNAKE_CASE__, feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n], feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase_ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase_ : Dict = sess.run(
SCREAMING_SNAKE_CASE__, feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n], feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase_ : Union[str, Any] = sess.run(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 644
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
snake_case_ : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Tuple = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
snake_case_ : List[Any] = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
snake_case_ : int = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : Any = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = ElectraTokenizer
def __init__( self : Optional[Any] , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="[UNK]" , __magic_name__ : Union[str, Any]="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Union[str, Any]="[MASK]" , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : List[Any] = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : int = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Optional[Any] = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : Dict = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : str=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 644
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 1
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ : List[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ : List[str] = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : int = time.time()
locka.acquire(SCREAMING_SNAKE_CASE__ )
assert time.time() - _start > timeout
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
UpperCAmelCase_ : List[str] = '''a''' * 1000 + '''.lock'''
UpperCAmelCase_ : Any = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ : List[str] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
locka.acquire(0 )
| 644
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case_ : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : int = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Dict = {
"unc-nlp/lxmert-base-uncased": 5_12,
}
snake_case_ : Dict = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : List[str] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = LxmertTokenizer
def __init__( self : List[str] , __magic_name__ : Any=None , __magic_name__ : Tuple=None , __magic_name__ : Tuple=True , __magic_name__ : str="[UNK]" , __magic_name__ : Optional[int]="[SEP]" , __magic_name__ : str="[PAD]" , __magic_name__ : Optional[Any]="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Any=True , __magic_name__ : Any=None , **__magic_name__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : Tuple = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Any = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = do_lower_case
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 644
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 1
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __a (lowerCamelCase ):
__a : List[str] = "vivit"
def __init__( self : Union[str, Any] , __magic_name__ : Tuple=2_24 , __magic_name__ : str=32 , __magic_name__ : str=[2, 16, 16] , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=7_68 , __magic_name__ : Optional[Any]=12 , __magic_name__ : int=12 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : Optional[int]="gelu_fast" , __magic_name__ : Dict=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Optional[Any]=1E-06 , __magic_name__ : Tuple=True , **__magic_name__ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[Any] = num_frames
UpperCAmelCase_ : str = tubelet_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : List[str] = qkv_bias
super().__init__(**__magic_name__ )
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case_ : Optional[Any] = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case_ : Optional[int] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = SavedModel()
UpperCAmelCase_ : Any = []
with open(os.path.join(SCREAMING_SNAKE_CASE__, '''utils''', '''tf_ops''', '''onnx.json''' ) ) as f:
UpperCAmelCase_ : int = json.load(SCREAMING_SNAKE_CASE__ )['''opsets''']
for i in range(1, opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE__ )] )
with open(SCREAMING_SNAKE_CASE__, '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : Dict = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : Any = sorted(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE__ )
if strict and len(SCREAMING_SNAKE_CASE__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*SCREAMING_SNAKE_CASE__, sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
snake_case_ : List[str] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 644
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
class __a :
def __init__( self : Optional[int] , __magic_name__ : str=None ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = data
UpperCAmelCase_ : Optional[int] = None
def __repr__( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = self
while temp:
string_rep.append(F"""{temp.data}""" )
UpperCAmelCase_ : List[str] = temp.next
return "->".join(__magic_name__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list ) -> Optional[int]:
if not elements_list:
raise Exception('''The Elements List is empty''' )
UpperCAmelCase_ : Tuple = Node(elements_list[0] )
for i in range(1, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Tuple = Node(elements_list[i] )
UpperCAmelCase_ : str = current.next
return head
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCamelCase_ ( ) -> Optional[Any]:
from doctest import testmod
testmod()
UpperCAmelCase_ : Optional[Any] = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(SCREAMING_SNAKE_CASE__ )
print('''Elements in Reverse:''' )
print_reverse(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __a (unittest.TestCase ):
def __init__( self : List[str] , __magic_name__ : Tuple , __magic_name__ : List[str]=13 , __magic_name__ : Tuple=7 , __magic_name__ : str=True , __magic_name__ : Dict=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=99 , __magic_name__ : Optional[int]=32 , __magic_name__ : List[str]=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : Tuple=37 , __magic_name__ : int="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[str]=16 , __magic_name__ : str=2 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Optional[int]=4 , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Dict = use_attention_mask
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Tuple = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_choices
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__magic_name__ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __a (lowerCamelCase , unittest.TestCase ):
__a : Union[str, Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase_ : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : Tuple = (1, 11, 7_68)
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1E-4 ) )
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
def __init__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any]=99 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Optional[Any]=7 , __magic_name__ : str=9 , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : Tuple=False , __magic_name__ : int=32 , __magic_name__ : Tuple=5 , __magic_name__ : str=4 , __magic_name__ : List[str]=37 , __magic_name__ : Any=8 , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.0_0_2 , __magic_name__ : Any=1 , __magic_name__ : int=0 , __magic_name__ : Optional[int]=0 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Any = encoder_seq_length
UpperCAmelCase_ : str = decoder_seq_length
# For common tests
UpperCAmelCase_ : List[str] = self.decoder_seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Optional[int] = use_attention_mask
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Dict = d_ff
UpperCAmelCase_ : Any = relative_attention_num_buckets
UpperCAmelCase_ : List[Any] = dropout_rate
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : Dict = eos_token_id
UpperCAmelCase_ : Dict = pad_token_id
UpperCAmelCase_ : str = decoder_start_token_id
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Any = decoder_layers
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : int=None , __magic_name__ : List[str]=None , __magic_name__ : int=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase_ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase_ : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__magic_name__ )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__magic_name__ )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__magic_name__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase_ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : Tuple = self.get_config()
UpperCAmelCase_ : List[str] = config.num_attention_heads
UpperCAmelCase_ : Dict = self.prepare_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, input_dict
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = UMTaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[str] = model(
input_ids=__magic_name__ , decoder_input_ids=__magic_name__ , attention_mask=__magic_name__ , decoder_attention_mask=__magic_name__ , )
UpperCAmelCase_ : List[Any] = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ )
UpperCAmelCase_ : int = result.last_hidden_state
UpperCAmelCase_ : List[str] = result.past_key_values
UpperCAmelCase_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__magic_name__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Tuple , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = UMTaModel(config=__magic_name__ ).get_decoder().to(__magic_name__ ).eval()
# first forward pass
UpperCAmelCase_ : int = model(__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
UpperCAmelCase_ : Dict = model(__magic_name__ , use_cache=__magic_name__ )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Tuple = model(__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Tuple = model(__magic_name__ , past_key_values=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = UMTaModel(config=__magic_name__ ).to(__magic_name__ ).half().eval()
UpperCAmelCase_ : Optional[Any] = model(**__magic_name__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__magic_name__ ).any().item() )
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__a : Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__a : Optional[int] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__a : List[Any] = True
__a : List[str] = False
__a : List[str] = False
__a : List[Any] = True
__a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__a : Union[str, Any] = [0.8, 0.9]
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = UMTaModel(config_and_inputs[0] ).to(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__magic_name__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=__magic_name__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : str = config_and_inputs[0]
UpperCAmelCase_ : Tuple = UMTaForConditionalGeneration(__magic_name__ ).eval()
model.to(__magic_name__ )
UpperCAmelCase_ : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__magic_name__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__magic_name__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__magic_name__ ),
}
for attn_name, (name, mask) in zip(__magic_name__ , head_masking.items() ):
UpperCAmelCase_ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase_ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__magic_name__ , return_dict_in_generate=__magic_name__ , **__magic_name__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase_ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__magic_name__ ).to(__magic_name__ )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__magic_name__ , legacy=__magic_name__ )
UpperCAmelCase_ : Any = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase_ : Tuple = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ).input_ids
# fmt: off
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : int = model.generate(input_ids.to(__magic_name__ ) )
UpperCAmelCase_ : List[str] = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 1
|
'''simple docstring'''
snake_case_ : Optional[int] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 644
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a (unittest.TestCase ):
def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[int]=7 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[Any]=18 , __magic_name__ : Tuple=30 , __magic_name__ : List[str]=4_00 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=None , __magic_name__ : Tuple=True , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : str = size
UpperCAmelCase_ : Any = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[Any] = do_flip_channel_order
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : int = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_flip_channel_order''' ) )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 644
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE__, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __a :
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[int]=None , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any]=None , **__magic_name__ : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[str] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
UpperCAmelCase_ : Dict = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=None , **__magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase_ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
UpperCAmelCase_ : Tuple = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : int=None , **__magic_name__ : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
UpperCAmelCase_ : List[str] = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : str = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
UpperCAmelCase_ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : str = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : int = after_output[0].numpy()
UpperCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1E-5 )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=None , **__magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_vision_text_model(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
UpperCAmelCase_ : Any = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
UpperCAmelCase_ : Dict = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Union[str, Any] = to_atuple(vision_model.config.image_size )
UpperCAmelCase_ : List[str] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase_ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase_ : str = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : float ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.get_pretrained_model_and_inputs()
UpperCAmelCase_ : Optional[Any] = model_a(**__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = model_a(**__magic_name__ )
UpperCAmelCase_ : str = after_outputs[0].numpy()
UpperCAmelCase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1E-5 )
@require_tf
class __a (lowerCamelCase , unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase_ : Tuple = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFViTModel(__magic_name__ , name='''vision_model''' )
UpperCAmelCase_ : Any = TFBertModel(__magic_name__ , name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFViTModelTester(self )
UpperCAmelCase_ : List[str] = TFBertModelTester(self )
UpperCAmelCase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __a (lowerCamelCase , unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
UpperCAmelCase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
UpperCAmelCase_ : Tuple = 13
UpperCAmelCase_ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase_ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase_ : Tuple = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str]=None , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
UpperCAmelCase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ : Dict = to_atuple(vision_model.config.image_size )
UpperCAmelCase_ : str = to_atuple(vision_model.config.patch_size )
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase_ : List[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase_ : Dict = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = TFDeiTModel(__magic_name__ , name='''vision_model''' )
UpperCAmelCase_ : Union[str, Any] = TFRobertaModel(__magic_name__ , name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFDeiTModelTester(self )
UpperCAmelCase_ : Optional[Any] = TFRobertaModelTester(self )
UpperCAmelCase_ : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = vision_config_and_inputs
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __a (lowerCamelCase , unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
UpperCAmelCase_ : Union[str, Any] = 13
UpperCAmelCase_ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase_ : List[str] = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : str = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFCLIPVisionModel(__magic_name__ , name='''vision_model''' )
UpperCAmelCase_ : str = TFBertModel(__magic_name__ , name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = TFCLIPVisionModelTester(self )
UpperCAmelCase_ : Optional[Any] = TFBertModelTester(self )
UpperCAmelCase_ : Dict = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = vision_config_and_inputs
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__magic_name__ )
UpperCAmelCase_ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase_ : List[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__magic_name__ , padding=__magic_name__ , return_tensors='''np''' )
UpperCAmelCase_ : Optional[Any] = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase_ : Dict = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __magic_name__ , atol=1E-3 ) )
| 644
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case_ : int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase_ : str = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
UpperCAmelCase_ : List[str] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).shape
else:
UpperCAmelCase_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : str = value
elif weight_type == "weight_v":
UpperCAmelCase_ : Tuple = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
else:
UpperCAmelCase_ : Optional[int] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = fairseq_model.state_dict()
UpperCAmelCase_ : Tuple = hf_model.feature_extractor
UpperCAmelCase_ : Union[str, Any] = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, hf_model.config.feat_extract_norm == '''group''', )
UpperCAmelCase_ : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2]
UpperCAmelCase_ : Any = mapped_key.replace('''*''', SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
UpperCAmelCase_ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : List[Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ : Dict = '''bias'''
elif "weight" in name:
UpperCAmelCase_ : Dict = '''weight'''
else:
UpperCAmelCase_ : Optional[int] = None
set_recursively(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = name.split('''.''' )
UpperCAmelCase_ : int = int(items[0] )
UpperCAmelCase_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = full_name.split('''adaptor.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = name.split('''.''' )
if items[1].isdigit():
UpperCAmelCase_ : Union[str, Any] = int(items[1] )
else:
UpperCAmelCase_ : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
UpperCAmelCase_ : int = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
UpperCAmelCase_ : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
UpperCAmelCase_ : List[str] = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
UpperCAmelCase_ : int = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
UpperCAmelCase_ : Tuple = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
UpperCAmelCase_ : List[Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = emb.weight.shape
UpperCAmelCase_ : Any = nn.Linear(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, bias=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int, ) -> Any:
UpperCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__, add_adapter=SCREAMING_SNAKE_CASE__, adapter_stride=SCREAMING_SNAKE_CASE__, adapter_kernel_size=SCREAMING_SNAKE_CASE__, use_auth_token=SCREAMING_SNAKE_CASE__, output_hidden_size=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Optional[Any] = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
UpperCAmelCase_ : Any = model[0].eval()
# load feature extractor
UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__, use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
UpperCAmelCase_ : int = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder, SCREAMING_SNAKE_CASE__ )
# load decoder weights
UpperCAmelCase_ : Union[str, Any] = MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=SCREAMING_SNAKE_CASE__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__, decoder=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = hf_wavavec.config.to_dict()
UpperCAmelCase_ : Tuple = tokenizer.pad_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.bos_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.eos_token_id
UpperCAmelCase_ : int = '''mbart50'''
UpperCAmelCase_ : List[str] = '''wav2vec2'''
UpperCAmelCase_ : Dict = tokenizer.eos_token_id
UpperCAmelCase_ : List[str] = 250004
UpperCAmelCase_ : List[str] = tokenizer.eos_token_id
UpperCAmelCase_ : Dict = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
snake_case_ : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]:
UpperCAmelCase_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : Tuple = '''_'''
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
UpperCAmelCase_ : List[str] = []
while True:
UpperCAmelCase_ : Tuple = ['''$'''] * len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : int = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase_ : Optional[int] = '''*'''
UpperCAmelCase_ : Any = '''*'''
temp.append('''X''' )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return pi
UpperCAmelCase_ : List[Any] = list(set(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]:
UpperCAmelCase_ : Any = []
for minterm in minterms:
UpperCAmelCase_ : Optional[int] = ''''''
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE__ )
return temp
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int ) -> bool:
UpperCAmelCase_ : Dict = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[int]], SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Union[str, Any] = -1
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Optional[int] = j
if count == 1:
UpperCAmelCase_ : str = 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : str = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : Any = count_n
UpperCAmelCase_ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[str] = 0
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[str], SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]:
UpperCAmelCase_ : Dict = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Dict = prime_implicants[i].count('''_''' )
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_for_table(prime_implicants[i], binary[j], SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = 1
return chart
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : List[Any] = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase_ : Optional[Any] = [
float(SCREAMING_SNAKE_CASE__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase_ : Union[str, Any] = decimal_to_binary(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = check(SCREAMING_SNAKE_CASE__ )
print('''Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = prime_implicant_chart(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = selection(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print('''Essential Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 644
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
from math import factorial
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
return sum(int(SCREAMING_SNAKE_CASE__ ) for x in str(factorial(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 644
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class __a (lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a : str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"text": Value("string" )} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "text"
__a : str = "labels"
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __magic_name__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ : Any = copy.deepcopy(self )
UpperCAmelCase_ : List[str] = self.label_schema.copy()
UpperCAmelCase_ : Optional[Any] = features[self.label_column]
UpperCAmelCase_ : List[str] = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 644
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a (lowerCamelCase ):
__a : Dict = "wav2vec2"
def __init__( self : int , __magic_name__ : List[str]=32 , __magic_name__ : int=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : Tuple=12 , __magic_name__ : Any=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : int=0.1 , __magic_name__ : str=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Any=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : List[str]=1E-5 , __magic_name__ : Any="group" , __magic_name__ : Dict="gelu" , __magic_name__ : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __magic_name__ : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=1_28 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Dict=False , __magic_name__ : List[Any]=True , __magic_name__ : Union[str, Any]=0.0_5 , __magic_name__ : List[str]=10 , __magic_name__ : int=2 , __magic_name__ : Any=0.0 , __magic_name__ : Optional[int]=10 , __magic_name__ : Optional[int]=0 , __magic_name__ : List[str]=3_20 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=1_00 , __magic_name__ : List[str]=2_56 , __magic_name__ : Dict=2_56 , __magic_name__ : Tuple=0.1 , __magic_name__ : List[str]="sum" , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Tuple=2_56 , __magic_name__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , __magic_name__ : Union[str, Any]=(5, 3, 3, 1, 1) , __magic_name__ : Tuple=(1, 2, 3, 1, 1) , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : int=0 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Tuple=2 , __magic_name__ : str=False , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=2 , __magic_name__ : int=3 , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Any = feat_extract_activation
UpperCAmelCase_ : List[str] = list(__magic_name__ )
UpperCAmelCase_ : str = list(__magic_name__ )
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : List[str] = conv_bias
UpperCAmelCase_ : Dict = num_conv_pos_embeddings
UpperCAmelCase_ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase_ : Any = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Union[str, Any] = final_dropout
UpperCAmelCase_ : int = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[str] = do_stable_layer_norm
UpperCAmelCase_ : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : str = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : List[Any] = mask_time_length
UpperCAmelCase_ : Dict = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : Tuple = mask_feature_length
UpperCAmelCase_ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : List[Any] = num_codevectors_per_group
UpperCAmelCase_ : List[Any] = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : Optional[int] = feat_quantizer_dropout
UpperCAmelCase_ : List[Any] = num_negatives
UpperCAmelCase_ : Optional[int] = codevector_dim
UpperCAmelCase_ : Optional[int] = proj_codevector_dim
UpperCAmelCase_ : Tuple = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : Optional[Any] = num_adapter_layers
UpperCAmelCase_ : Union[str, Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = list(__magic_name__ )
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : Dict = list(__magic_name__ )
UpperCAmelCase_ : int = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 644
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 1
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
return getitem, k
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
return setitem, k, v
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
return delitem, k
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Tuple, *SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
try:
return fun(SCREAMING_SNAKE_CASE__, *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
snake_case_ : Optional[Any] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
snake_case_ : Optional[int] = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
snake_case_ : Dict = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
snake_case_ : List[str] = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
snake_case_ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
snake_case_ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''', (
pytest.param(_add_items, id='''add items''' ),
pytest.param(_overwrite_items, id='''overwrite items''' ),
pytest.param(_delete_items, id='''delete items''' ),
pytest.param(_access_absent_items, id='''access absent items''' ),
pytest.param(_add_with_resize_up, id='''add with resize up''' ),
pytest.param(_add_with_resize_down, id='''add with resize down''' ),
), )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
UpperCAmelCase_ : Tuple = HashMap(initial_block_size=4 )
UpperCAmelCase_ : Dict = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, *SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = _run_operation(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> int:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith('''_''' )
UpperCAmelCase_ : Optional[int] = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
UpperCAmelCase_ : Dict = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 644
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 1
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 1
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = R'''\w+[.]\d+'''
UpperCAmelCase_ : Tuple = re.findall(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for pat in pats:
UpperCAmelCase_ : Optional[int] = key.replace(SCREAMING_SNAKE_CASE__, '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : Union[str, Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : Optional[int] = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Any = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : List[str] = rename_key(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Dict = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class __a :
def __init__( self : Tuple , __magic_name__ : list[tuple[float, float]] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase_ : List[str] = len(__magic_name__ ) - 1
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __magic_name__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__magic_name__ ) , 5 ) == 1
return output_values
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ : Any = self.basis_function(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = 0.0
UpperCAmelCase_ : Tuple = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : float = 0.0_1 ) -> List[str]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase_ : list[float] = [] # x coordinates of points to plot
UpperCAmelCase_ : list[float] = [] # y coordinates of points to plot
UpperCAmelCase_ : int = 0.0
while t <= 1:
UpperCAmelCase_ : str = self.bezier_curve_function(__magic_name__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase_ : Any = [i[0] for i in self.list_of_points]
UpperCAmelCase_ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__magic_name__ , __magic_name__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__magic_name__ , __magic_name__ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 644
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : str ) -> Any:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase_ : List[Any] = mf_knapsack(i - 1, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase_ : Tuple = max(
mf_knapsack(i - 1, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), mf_knapsack(i - 1, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, j - wt[i - 1] ) + val[i - 1], )
UpperCAmelCase_ : Tuple = val
return f[i][j]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase_ : List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
UpperCAmelCase_ : List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : list, SCREAMING_SNAKE_CASE__ : list ) -> Optional[int]:
if not (isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__, (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCAmelCase_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
if num_items != len(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Union[str, Any] = (
'''The number of weights must be the same as the number of values.\n'''
F"""But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
if not isinstance(wt[i], SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Union[str, Any] = (
'''All weights must be integers but got weight of '''
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = knapsack(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : set = set()
_construct_solution(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return optimal_val, example_optional_set
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list, SCREAMING_SNAKE_CASE__ : list, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : set ) -> Tuple:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, i - 1, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
optimal_set.add(SCREAMING_SNAKE_CASE__ )
_construct_solution(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, i - 1, j - wt[i - 1], SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : List[str] = [3, 2, 4, 4]
snake_case_ : Optional[int] = [4, 3, 2, 3]
snake_case_ : List[Any] = 4
snake_case_ : Union[str, Any] = 6
snake_case_ : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
snake_case_ ,snake_case_ : Tuple = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
snake_case_ ,snake_case_ : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 644
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[Any] = MgpstrTokenizer
__a : str = False
__a : Union[str, Any] = {}
__a : Any = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Union[str, Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase_ : Dict = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = '''tester'''
UpperCAmelCase_ : Any = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase_ : List[Any] = tokenizer.encode([special_token] , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
UpperCAmelCase_ : int = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_input_output_texts(__magic_name__ )
UpperCAmelCase_ : int = tokenizer.tokenize(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
UpperCAmelCase_ : Any = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertNotEqual(len(__magic_name__ ) , 0 )
UpperCAmelCase_ : Dict = tokenizer.decode(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , __magic_name__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
pass
| 644
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 1
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
snake_case_ : Dict = logging.get_logger(__name__)
# General docstring
snake_case_ : Optional[int] = "PoolFormerConfig"
# Base docstring
snake_case_ : List[str] = "sail/poolformer_s12"
snake_case_ : Dict = [1, 5_12, 7, 7]
# Image classification docstring
snake_case_ : Tuple = "sail/poolformer_s12"
snake_case_ : Optional[int] = "tabby, tabby cat"
snake_case_ : Any = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : float = 0.0, SCREAMING_SNAKE_CASE__ : bool = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
UpperCAmelCase_ : List[str] = 1 - drop_prob
UpperCAmelCase_ : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCAmelCase_ : List[str] = keep_prob + torch.rand(SCREAMING_SNAKE_CASE__, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
UpperCAmelCase_ : List[Any] = input.div(SCREAMING_SNAKE_CASE__ ) * random_tensor
return output
class __a (nn.Module ):
def __init__( self : Optional[Any] , __magic_name__ : Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : List[str] = drop_prob
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__magic_name__ , self.drop_prob , self.training )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class __a (nn.Module ):
def __init__( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int=None ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : List[str] = patch_size if isinstance(__magic_name__ , collections.abc.Iterable ) else (patch_size, patch_size)
UpperCAmelCase_ : Optional[Any] = stride if isinstance(__magic_name__ , collections.abc.Iterable ) else (stride, stride)
UpperCAmelCase_ : List[str] = padding if isinstance(__magic_name__ , collections.abc.Iterable ) else (padding, padding)
UpperCAmelCase_ : List[str] = nn.Convad(__magic_name__ , __magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=__magic_name__ )
UpperCAmelCase_ : List[str] = norm_layer(__magic_name__ ) if norm_layer else nn.Identity()
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.projection(__magic_name__ )
UpperCAmelCase_ : str = self.norm(__magic_name__ )
return embeddings
class __a (nn.GroupNorm ):
def __init__( self : List[str] , __magic_name__ : Optional[Any] , **__magic_name__ : int ) -> Dict:
"""simple docstring"""
super().__init__(1 , __magic_name__ , **__magic_name__ )
class __a (nn.Module ):
def __init__( self : Any , __magic_name__ : List[Any] ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : str = nn.AvgPoolad(__magic_name__ , stride=1 , padding=pool_size // 2 , count_include_pad=__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.pool(__magic_name__ ) - hidden_states
class __a (nn.Module ):
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Union[str, Any] = nn.Convad(__magic_name__ , __magic_name__ , 1 )
UpperCAmelCase_ : int = nn.Convad(__magic_name__ , __magic_name__ , 1 )
UpperCAmelCase_ : Tuple = PoolFormerDropPath(__magic_name__ )
if isinstance(config.hidden_act , __magic_name__ ):
UpperCAmelCase_ : Tuple = ACTaFN[config.hidden_act]
else:
UpperCAmelCase_ : Dict = config.hidden_act
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.conva(__magic_name__ )
UpperCAmelCase_ : int = self.act_fn(__magic_name__ )
UpperCAmelCase_ : List[str] = self.drop(__magic_name__ )
UpperCAmelCase_ : Optional[int] = self.conva(__magic_name__ )
UpperCAmelCase_ : int = self.drop(__magic_name__ )
return hidden_states
class __a (nn.Module ):
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : List[Any] = PoolFormerPooling(__magic_name__ )
UpperCAmelCase_ : Optional[int] = PoolFormerOutput(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = PoolFormerGroupNorm(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = PoolFormerGroupNorm(__magic_name__ )
# Useful for training neural nets
UpperCAmelCase_ : Any = PoolFormerDropPath(__magic_name__ ) if drop_path > 0.0 else nn.Identity()
UpperCAmelCase_ : Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
UpperCAmelCase_ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__magic_name__) ) , requires_grad=__magic_name__ )
UpperCAmelCase_ : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__magic_name__) ) , requires_grad=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
if self.use_layer_scale:
UpperCAmelCase_ : Dict = self.pooling(self.before_norm(__magic_name__ ) )
UpperCAmelCase_ : Optional[int] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCAmelCase_ : List[Any] = hidden_states + self.drop_path(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ()
UpperCAmelCase_ : Optional[int] = self.output(self.after_norm(__magic_name__ ) )
UpperCAmelCase_ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCAmelCase_ : Union[str, Any] = hidden_states + self.drop_path(__magic_name__ )
UpperCAmelCase_ : Any = (output,) + outputs
return outputs
else:
UpperCAmelCase_ : List[Any] = self.drop_path(self.pooling(self.before_norm(__magic_name__ ) ) )
# First residual connection
UpperCAmelCase_ : List[str] = pooling_output + hidden_states
UpperCAmelCase_ : Optional[Any] = ()
# Second residual connection inside the PoolFormerOutput block
UpperCAmelCase_ : Union[str, Any] = self.drop_path(self.output(self.after_norm(__magic_name__ ) ) )
UpperCAmelCase_ : List[str] = hidden_states + layer_output
UpperCAmelCase_ : int = (output,) + outputs
return outputs
class __a (nn.Module ):
def __init__( self : Tuple , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : int = config
# stochastic depth decay rule
UpperCAmelCase_ : Dict = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
UpperCAmelCase_ : str = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
UpperCAmelCase_ : Tuple = nn.ModuleList(__magic_name__ )
# Transformer blocks
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCAmelCase_ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__magic_name__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__magic_name__ ) )
UpperCAmelCase_ : Optional[int] = nn.ModuleList(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : List[str]=False , __magic_name__ : Tuple=True ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = () if output_hidden_states else None
UpperCAmelCase_ : Any = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = layers
# Get patch embeddings from hidden_states
UpperCAmelCase_ : Optional[int] = embedding_layer(__magic_name__ )
# Send the embeddings through the blocks
for _, blk in enumerate(__magic_name__ ):
UpperCAmelCase_ : Optional[int] = blk(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = layer_outputs[0]
if output_hidden_states:
UpperCAmelCase_ : List[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
class __a (lowerCamelCase ):
__a : Optional[Any] = PoolFormerConfig
__a : str = "poolformer"
__a : Tuple = "pixel_values"
__a : List[Any] = True
def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] ) -> Any:
"""simple docstring"""
if isinstance(__magic_name__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__magic_name__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Tuple = value
snake_case_ : int = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
snake_case_ : Optional[Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , lowerCamelCase , )
class __a (lowerCamelCase ):
def __init__( self : int , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCAmelCase_ : str = config
UpperCAmelCase_ : Optional[Any] = PoolFormerEncoder(__magic_name__ )
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
UpperCAmelCase_ : Dict = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__magic_name__ , hidden_states=encoder_outputs.hidden_states , )
class __a (nn.Module ):
def __init__( self : Dict , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : str = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.dense(__magic_name__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , lowerCamelCase , )
class __a (lowerCamelCase ):
def __init__( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCAmelCase_ : Optional[int] = config.num_labels
UpperCAmelCase_ : Optional[Any] = PoolFormerModel(__magic_name__ )
# Final norm
UpperCAmelCase_ : List[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCAmelCase_ : Tuple = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[torch.LongTensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Tuple = self.poolformer(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , )
UpperCAmelCase_ : Dict = outputs[0]
UpperCAmelCase_ : List[Any] = self.classifier(self.norm(__magic_name__ ).mean([-2, -1] ) )
UpperCAmelCase_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ : Dict = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ : Any = '''single_label_classification'''
else:
UpperCAmelCase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ : List[Any] = loss_fct(__magic_name__ , __magic_name__ )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ : str = CrossEntropyLoss()
UpperCAmelCase_ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ : int = BCEWithLogitsLoss()
UpperCAmelCase_ : Tuple = loss_fct(__magic_name__ , __magic_name__ )
if not return_dict:
UpperCAmelCase_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Optional[Any] = StableUnCLIPImgaImgPipeline
__a : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__a : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a : str = frozenset([] )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 32
UpperCAmelCase_ : Optional[int] = embedder_hidden_size
# image encoding components
UpperCAmelCase_ : Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__magic_name__ , projection_dim=__magic_name__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableUnCLIPImageNormalizer(embedding_dim=__magic_name__ )
UpperCAmelCase_ : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__magic_name__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__magic_name__ , layers_per_block=1 , upcast_attention=__magic_name__ , use_linear_projection=__magic_name__ , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=__magic_name__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL()
UpperCAmelCase_ : Any = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Any=0 , __magic_name__ : List[str]=True ) -> List[Any]:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : str = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if pil_image:
UpperCAmelCase_ : Any = input_image * 0.5 + 0.5
UpperCAmelCase_ : List[str] = input_image.clamp(0 , 1 )
UpperCAmelCase_ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ : Tuple = DiffusionPipeline.numpy_to_pil(__magic_name__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = StableUnCLIPImgaImgPipeline(**__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
inputs.update({'''image_embeds''': None} )
UpperCAmelCase_ : Tuple = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__magic_name__ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__magic_name__ )
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
UpperCAmelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
UpperCAmelCase_ : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe(__magic_name__ , '''anime turle''' , generator=__magic_name__ , output_type='''np''' )
UpperCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
UpperCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
UpperCAmelCase_ : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe(__magic_name__ , '''anime turle''' , generator=__magic_name__ , output_type='''np''' )
UpperCAmelCase_ : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Dict = pipe(
__magic_name__ , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["PerceiverFeatureExtractor"]
snake_case_ : int = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case_ : List[str] = logging.getLogger(__name__)
snake_case_ : Any = "Hello world! cécé herlolip"
snake_case_ : List[str] = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
UpperCAmelCase_ : List[Any] = BertAbsConfig(
temp_dir='''.''', finetune_bert=SCREAMING_SNAKE_CASE__, large=SCREAMING_SNAKE_CASE__, share_emb=SCREAMING_SNAKE_CASE__, use_bert_emb=SCREAMING_SNAKE_CASE__, encoder='''bert''', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, )
UpperCAmelCase_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : storage )
UpperCAmelCase_ : List[str] = AbsSummarizer(SCREAMING_SNAKE_CASE__, torch.device('''cpu''' ), SCREAMING_SNAKE_CASE__ )
original.eval()
UpperCAmelCase_ : Tuple = BertAbsSummarizer(SCREAMING_SNAKE_CASE__, torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase_ : str = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase_ : Dict = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase_ : List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase_ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase_ : Optional[int] = encoder_input_ids
UpperCAmelCase_ : int = decoder_input_ids
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase_ : Any = original(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase_ : Tuple = original.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = new_model(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase_ : Any = new_model.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Tuple = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : List[Any] = torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict(), '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
snake_case_ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 644
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list ) -> list:
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return lst
UpperCAmelCase_ : Optional[int] = 1
while i < len(SCREAMING_SNAKE_CASE__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ : Any = 1
return lst
if __name__ == "__main__":
snake_case_ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
snake_case_ : str = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 644
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 1
|
'''simple docstring'''
import math
class __a :
def __init__( self : Any , __magic_name__ : str=0 ) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = n
UpperCAmelCase_ : int = [
[math.inf for j in range(0 , __magic_name__ )] for i in range(0 , __magic_name__ )
] # adjacency matrix for weight
UpperCAmelCase_ : Dict = [
[math.inf for j in range(0 , __magic_name__ )] for i in range(0 , __magic_name__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = w
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase_ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
snake_case_ : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 644
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''ylacombe/bark-small'''
UpperCAmelCase_ : Any = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = '''en_speaker_1'''
UpperCAmelCase_ : Dict = '''This is a test string'''
UpperCAmelCase_ : List[Any] = '''speaker_embeddings_path.json'''
UpperCAmelCase_ : Union[str, Any] = '''speaker_embeddings'''
def UpperCAmelCase__ ( self : int , **__magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase_ : Tuple = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : Union[str, Any] = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Tuple = 8
UpperCAmelCase_ : str = {
'''semantic_prompt''': np.ones(__magic_name__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=__magic_name__ )
UpperCAmelCase_ : Tuple = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__magic_name__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__magic_name__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : int = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BarkProcessor(tokenizer=__magic_name__ )
UpperCAmelCase_ : Dict = processor(text=self.input_string )
UpperCAmelCase_ : Optional[int] = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 644
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
import argparse
import datetime
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str:
UpperCAmelCase_ : List[str] = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
UpperCAmelCase_ : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
UpperCAmelCase_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
UpperCAmelCase_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
UpperCAmelCase_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
UpperCAmelCase_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
UpperCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
UpperCAmelCase_ : Union[str, Any] = datetime.date(int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ) )
# Start math
if m <= 2:
UpperCAmelCase_ : Optional[int] = y - 1
UpperCAmelCase_ : Dict = m + 12
# maths var
UpperCAmelCase_ : int = int(str(SCREAMING_SNAKE_CASE__ )[:2] )
UpperCAmelCase_ : int = int(str(SCREAMING_SNAKE_CASE__ )[2:] )
UpperCAmelCase_ : int = int(2.6 * m - 5.39 )
UpperCAmelCase_ : int = int(c / 4 )
UpperCAmelCase_ : int = int(k / 4 )
UpperCAmelCase_ : int = int(d + k )
UpperCAmelCase_ : int = int(t + u + v + x )
UpperCAmelCase_ : int = int(z - (2 * c) )
UpperCAmelCase_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
UpperCAmelCase_ : str = F"""Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Optional[Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
snake_case_ : Tuple = parser.parse_args()
zeller(args.date_input)
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __a (nn.Module ):
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
UpperCAmelCase_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Optional[Any] = attentions
if self.add_downsample:
UpperCAmelCase_ : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict=True ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
UpperCAmelCase_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : int = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __a (nn.Module ):
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : bool = True
__a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
UpperCAmelCase_ : Optional[int] = resnets
if self.add_downsample:
UpperCAmelCase_ : Union[str, Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[Any]=True ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Dict = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __a (nn.Module ):
__a : int
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : str = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
UpperCAmelCase_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
if self.add_upsample:
UpperCAmelCase_ : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any]=True ) -> List[Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : int = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase_ : str = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
UpperCAmelCase_ : Optional[int] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
UpperCAmelCase_ : int = self.upsamplers_a(__magic_name__ )
return hidden_states
class __a (nn.Module ):
__a : int
__a : int
__a : int
__a : float = 0.0
__a : int = 1
__a : bool = True
__a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Any = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
UpperCAmelCase_ : Optional[int] = resnets
if self.add_upsample:
UpperCAmelCase_ : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : str=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Tuple = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Optional[Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase_ : Dict = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __a (nn.Module ):
__a : int
__a : float = 0.0
__a : int = 1
__a : int = 1
__a : bool = False
__a : bool = False
__a : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
# there is always at least one resnet
UpperCAmelCase_ : int = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase_ : List[Any] = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
UpperCAmelCase_ : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
UpperCAmelCase_ : Any = resnets
UpperCAmelCase_ : Optional[int] = attentions
def __call__( self : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : List[Any]=True ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase_ : Any = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 644
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __a :
def __init__( self : Union[str, Any] , __magic_name__ : int=None , **__magic_name__ : int ) -> Dict:
"""simple docstring"""
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCAmelCase_ : str = model
UpperCAmelCase_ : int = kwargs.get('''model_save_dir''' , __magic_name__ )
UpperCAmelCase_ : Optional[Any] = kwargs.get('''latest_model_name''' , __magic_name__ )
def __call__( self : Any , **__magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {k: np.array(__magic_name__ ) for k, v in kwargs.items()}
return self.model.run(__magic_name__ , __magic_name__ )
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : Union[str, Path] , __magic_name__ : str=None , __magic_name__ : int=None ) -> Optional[int]:
"""simple docstring"""
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = '''CPUExecutionProvider'''
return ort.InferenceSession(__magic_name__ , providers=[provider] , sess_options=__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[str] = None , **__magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : str = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : List[Any] = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[int] = self.model_save_dir.joinpath(__magic_name__ )
if src_path.exists():
UpperCAmelCase_ : str = Path(__magic_name__ ).joinpath(__magic_name__ )
try:
shutil.copyfile(__magic_name__ , __magic_name__ )
except shutil.SameFileError:
pass
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : str , ) -> str:
"""simple docstring"""
if os.path.isfile(__magic_name__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
# saving model weights/files
self._save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , __magic_name__ : Union[str, Path] , __magic_name__ : Optional[Union[bool, str, None]] = None , __magic_name__ : Optional[Union[str, None]] = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional["ort.SessionOptions"] = None , **__magic_name__ : int , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : List[Any] = OnnxRuntimeModel.load_model(
os.path.join(__magic_name__ , __magic_name__ ) , provider=__magic_name__ , sess_options=__magic_name__ )
UpperCAmelCase_ : Any = Path(__magic_name__ )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=__magic_name__ , filename=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , )
UpperCAmelCase_ : Union[str, Any] = Path(__magic_name__ ).parent
UpperCAmelCase_ : Dict = Path(__magic_name__ ).name
UpperCAmelCase_ : str = OnnxRuntimeModel.load_model(__magic_name__ , provider=__magic_name__ , sess_options=__magic_name__ )
return cls(model=__magic_name__ , **__magic_name__ )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , __magic_name__ : Union[str, Path] , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , **__magic_name__ : Any , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if len(str(__magic_name__ ).split('''@''' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model_id.split('''@''' )
return cls._from_pretrained(
model_id=__magic_name__ , revision=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , use_auth_token=__magic_name__ , **__magic_name__ , )
| 644
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 1
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : List[str] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> str:
UpperCAmelCase_ : Optional[Any] = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE__ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
# Let's go
UpperCAmelCase_ : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 1
|
'''simple docstring'''
from collections.abc import Generator
def lowerCamelCase_ ( ) -> Generator[int, None, None]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 0, 1
while True:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = b, a + b
yield b
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Optional[Any] = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __a :
def __init__( self : List[Any] , __magic_name__ : list[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : list[dict] = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__magic_name__ )
self.set_fail_transitions()
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 0
for character in keyword:
UpperCAmelCase_ : List[Any] = self.find_next_state(__magic_name__ , __magic_name__ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase_ : Optional[Any] = len(self.adlist ) - 1
else:
UpperCAmelCase_ : Optional[Any] = next_state
self.adlist[current_state]["output"].append(__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__magic_name__ )
UpperCAmelCase_ : Any = 0
while q:
UpperCAmelCase_ : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__magic_name__ )
UpperCAmelCase_ : str = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__magic_name__ , self.adlist[child]['''value'''] ) is None
and state != 0
):
UpperCAmelCase_ : Tuple = self.adlist[state]['''fail_state''']
UpperCAmelCase_ : Union[str, Any] = self.find_next_state(
__magic_name__ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[Any] = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
while (
self.find_next_state(__magic_name__ , string[i] ) is None
and current_state != 0
):
UpperCAmelCase_ : Tuple = self.adlist[current_state]['''fail_state''']
UpperCAmelCase_ : List[Any] = self.find_next_state(__magic_name__ , string[i] )
if next_state is None:
UpperCAmelCase_ : List[Any] = 0
else:
UpperCAmelCase_ : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase_ : Any = []
result[key].append(i - len(__magic_name__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case_ : List[str] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
snake_case_ : Optional[Any] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
snake_case_ : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = simple_accuracy(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = float(pearsonr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
UpperCAmelCase_ : Any = float(spearmanr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__magic_name__ , __magic_name__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(__magic_name__ , __magic_name__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__magic_name__ , __magic_name__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 644
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
assert x is not None
assert y is not None
UpperCAmelCase_ : str = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
# declaring the array for storing the dp values
UpperCAmelCase_ : Optional[int] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
UpperCAmelCase_ : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
UpperCAmelCase_ : Any = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
UpperCAmelCase_ : Tuple = ''''''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = m, n
while i > 0 and j > 0:
UpperCAmelCase_ : List[str] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCAmelCase_ : Dict = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
snake_case_ : Optional[int] = "AGGTAB"
snake_case_ : str = "GXTXAYB"
snake_case_ : Optional[Any] = 4
snake_case_ : Optional[int] = "GTAB"
snake_case_ ,snake_case_ : List[Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a (lowerCamelCase ):
__a : Dict = "naver-clova-ix/donut-base-finetuned-docvqa"
__a : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
__a : int = "document_qa"
__a : Optional[Any] = AutoProcessor
__a : Optional[Any] = VisionEncoderDecoderModel
__a : Any = ["image", "text"]
__a : Union[str, Any] = ["text"]
def __init__( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : "Image" , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase_ : Any = task_prompt.replace('''{user_input}''' , __magic_name__ )
UpperCAmelCase_ : int = self.pre_processor.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors='''pt''' ).input_ids
UpperCAmelCase_ : int = self.pre_processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__magic_name__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__magic_name__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__magic_name__ , ).sequences
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.pre_processor.batch_decode(__magic_name__ )[0]
UpperCAmelCase_ : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCAmelCase_ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCAmelCase_ : Dict = re.sub(R'''<.*?>''' , '''''' , __magic_name__ , count=1 ).strip() # remove first task start token
UpperCAmelCase_ : List[str] = self.pre_processor.tokenajson(__magic_name__ )
return sequence["answer"]
| 644
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __a (lowerCamelCase ):
__a : Dict = "microsoft/speecht5_tts"
__a : List[str] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__a : Dict = "text_reader"
__a : int = SpeechTaProcessor
__a : List[str] = SpeechTaForTextToSpeech
__a : str = SpeechTaHifiGan
__a : int = ["text"]
__a : int = ["audio"]
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
if self.post_processor is None:
UpperCAmelCase_ : List[Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : str=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.pre_processor(text=__magic_name__ , return_tensors='''pt''' , truncation=__magic_name__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
UpperCAmelCase_ : List[str] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
UpperCAmelCase_ : Any = torch.tensor(embeddings_dataset[73_05]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**__magic_name__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(__magic_name__ ).cpu().detach()
| 644
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 1
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Optional[int] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any]=8 ) -> str:
UpperCAmelCase_ : Optional[int] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __a (lowerCamelCase ):
def __init__( self : List[str] , __magic_name__ : MultilingualCLIP , __magic_name__ : XLMRobertaTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, DDPMScheduler] , __magic_name__ : VQModel , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : List[str] = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : Optional[Any] = latents.to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = len(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else 1
# get prompt text embeddings
UpperCAmelCase_ : List[str] = self.tokenizer(
__magic_name__ , padding='''max_length''' , truncation=__magic_name__ , max_length=77 , return_attention_mask=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors='''pt''' , )
UpperCAmelCase_ : Tuple = text_inputs.input_ids
UpperCAmelCase_ : Optional[int] = self.tokenizer(__magic_name__ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Dict = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase_ : List[Any] = text_input_ids.to(__magic_name__ )
UpperCAmelCase_ : List[str] = text_inputs.attention_mask.to(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.text_encoder(
input_ids=__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : str = prompt_embeds.repeat_interleave(__magic_name__ , dim=0 )
UpperCAmelCase_ : Union[str, Any] = text_encoder_hidden_states.repeat_interleave(__magic_name__ , dim=0 )
UpperCAmelCase_ : List[Any] = text_mask.repeat_interleave(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : List[str]
if negative_prompt is None:
UpperCAmelCase_ : Optional[int] = [''''''] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !="""
F""" {type(__magic_name__ )}.""" )
elif isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase_ : List[Any] = negative_prompt
UpperCAmelCase_ : List[str] = self.tokenizer(
__magic_name__ , padding='''max_length''' , max_length=77 , truncation=__magic_name__ , return_attention_mask=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors='''pt''' , )
UpperCAmelCase_ : str = uncond_input.input_ids.to(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = uncond_input.attention_mask.to(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.text_encoder(
input_ids=__magic_name__ , attention_mask=__magic_name__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[str] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : List[str] = negative_prompt_embeds.repeat(1 , __magic_name__ )
UpperCAmelCase_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __magic_name__ )
UpperCAmelCase_ : str = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , __magic_name__ , 1 )
UpperCAmelCase_ : Any = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __magic_name__ , -1 )
UpperCAmelCase_ : Dict = uncond_text_mask.repeat_interleave(__magic_name__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase_ : Dict = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase_ : Optional[Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Union[str, Any]=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : int=0 ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
if self.safety_checker is not None:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(self.safety_checker , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
UpperCAmelCase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , __magic_name__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 1_00 , __magic_name__ : float = 4.0 , __magic_name__ : int = 1 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , ) -> Dict:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Optional[int] = 1
elif isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Dict = len(__magic_name__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}""" )
UpperCAmelCase_ : List[Any] = self._execution_device
UpperCAmelCase_ : Any = batch_size * num_images_per_prompt
UpperCAmelCase_ : int = guidance_scale > 1.0
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._encode_prompt(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : str = torch.cat(__magic_name__ , dim=0 )
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : List[str] = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
UpperCAmelCase_ : List[Any] = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
UpperCAmelCase_ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__magic_name__ )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
UpperCAmelCase_ : Optional[int] = self.scheduler.timesteps
UpperCAmelCase_ : List[Any] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_new_h_w(__magic_name__ , __magic_name__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : List[Any] = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase_ : List[str] = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = variance_pred.chunk(2 )
UpperCAmelCase_ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , ).prev_sample
# post-processing
UpperCAmelCase_ : Any = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : Tuple = image * 0.5 + 0.5
UpperCAmelCase_ : Optional[int] = image.clamp(0 , 1 )
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 644
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 1
|
'''simple docstring'''
import math
class __a :
def UpperCAmelCase__ ( self : str , __magic_name__ : list[list[float]] , __magic_name__ : list[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = 0.0
UpperCAmelCase_ : Tuple = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCAmelCase__ ( self : str , __magic_name__ : list[list[int | float]] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : float ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase_ ( ) -> None:
# Training Examples ( m, n )
UpperCAmelCase_ : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase_ : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase_ : int = SelfOrganizingMap()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Optional[Any] = 0.5
for _ in range(SCREAMING_SNAKE_CASE__ ):
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
# training sample
UpperCAmelCase_ : List[Any] = training_samples[j]
# Compute the winning vector
UpperCAmelCase_ : Tuple = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Update the winning vector
UpperCAmelCase_ : int = self_organizing_map.update(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# classify test sample
UpperCAmelCase_ : List[str] = [0, 0, 0, 1]
UpperCAmelCase_ : List[str] = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 644
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 1
|
'''simple docstring'''
import operator as op
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[Any] = lambda SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase_ : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(SCREAMING_SNAKE_CASE__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(SCREAMING_SNAKE_CASE__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
else:
UpperCAmelCase_ : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
UpperCAmelCase_ : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
stack.append(
str(opr[x](int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
snake_case_ : List[Any] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : int = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : str = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : int = False , __magic_name__ : Tuple = False , __magic_name__ : Optional[Any] = False , __magic_name__ : List[Any] = False , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ : Optional[int] = [[refs[i] for refs in references] for i in range(A__ )]
UpperCAmelCase_ : List[str] = TER(
normalized=A__ , no_punct=A__ , asian_support=A__ , case_sensitive=A__ , )
UpperCAmelCase_ : Optional[int] = sb_ter.corpus_score(A__ , A__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if len(__A ) != 32:
raise ValueError('''Input must be of length 32''' )
UpperCAmelCase_ : Any = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
if i < 0:
raise ValueError('''Input must be non-negative''' )
UpperCAmelCase_ : str = format(__A, '''08x''' )[-8:]
UpperCAmelCase_ : List[str] = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
UpperCAmelCase_ : List[str] = b''''''
for char in message:
bit_string += format(__A, '''08b''' ).encode('''utf-8''' )
UpperCAmelCase_ : List[str] = format(len(__A ), '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
if len(__A ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0, len(__A ), 512 ):
UpperCAmelCase_ : Optional[int] = bit_string[pos : pos + 512]
UpperCAmelCase_ : str = []
for i in range(0, 512, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if i < 0:
raise ValueError('''Input must be non-negative''' )
UpperCAmelCase_ : Optional[Any] = format(__A, '''032b''' )
UpperCAmelCase_ : List[str] = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A, 2 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
return (a + b) % 2**32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = preprocess(__A )
UpperCAmelCase_ : int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase_ : int = 0x67_452_301
UpperCAmelCase_ : Any = 0xEF_CDA_B89
UpperCAmelCase_ : Optional[Any] = 0x98_BAD_CFE
UpperCAmelCase_ : int = 0x10_325_476
UpperCAmelCase_ : int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A ):
UpperCAmelCase_ : List[str] = aa
UpperCAmelCase_ : Union[str, Any] = ba
UpperCAmelCase_ : List[str] = ca
UpperCAmelCase_ : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase_ : int = d ^ (b & (c ^ d))
UpperCAmelCase_ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase_ : Tuple = c ^ (d & (b ^ c))
UpperCAmelCase_ : Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase_ : Tuple = b ^ c ^ d
UpperCAmelCase_ : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase_ : Any = c ^ (b | not_aa(__A ))
UpperCAmelCase_ : Tuple = (7 * i) % 16
UpperCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase_ : Optional[int] = d
UpperCAmelCase_ : int = c
UpperCAmelCase_ : Any = b
UpperCAmelCase_ : List[Any] = sum_aa(__A, left_rotate_aa(__A, shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase_ : int = sum_aa(__A, __A )
UpperCAmelCase_ : int = sum_aa(__A, __A )
UpperCAmelCase_ : List[Any] = sum_aa(__A, __A )
UpperCAmelCase_ : Optional[int] = sum_aa(__A, __A )
UpperCAmelCase_ : Any = reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> bool:
# Base Case
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index], _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
# Color current vertex
UpperCAmelCase_ : List[str] = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, index + 1 ):
return True
# Backtrack
UpperCAmelCase_ : Optional[int] = -1
return False
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any] ) -> list[int]:
UpperCAmelCase_ : Union[str, Any] = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, 0 ):
return colored_vertices
return []
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
snake_case_ : Optional[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
snake_case_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
snake_case_ : List[str] = BeautifulSoup(res.text, "html.parser")
snake_case_ : List[str] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
from __future__ import annotations
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : List[str] ) -> float:
UpperCAmelCase_ : str = u
for i in range(1, _lowercase ):
UpperCAmelCase_ : str = temp * (u - i)
return temp
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : Any = int(input('''enter the numbers of values: ''' ) )
UpperCAmelCase_ : list[list[float]] = []
for _ in range(_lowercase ):
y.append([] )
for i in range(_lowercase ):
for j in range(_lowercase ):
y[i].append(_lowercase )
UpperCAmelCase_ : List[str] = 0
print('''enter the values of parameters in a list: ''' )
UpperCAmelCase_ : Dict = list(map(_lowercase, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(_lowercase ):
UpperCAmelCase_ : Union[str, Any] = float(input() )
UpperCAmelCase_ : str = int(input('''enter the value to interpolate: ''' ) )
UpperCAmelCase_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, _lowercase ):
for j in range(n - i ):
UpperCAmelCase_ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
UpperCAmelCase_ : List[str] = y[0][0]
for i in range(1, _lowercase ):
summ += (ucal(_lowercase, _lowercase ) * y[0][i]) / math.factorial(_lowercase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Tuple=13 , __magic_name__ : int=7 , __magic_name__ : Any=False , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=False , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=33 , __magic_name__ : Any=32 , __magic_name__ : List[Any]=5 , __magic_name__ : Any=4 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : str=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[str]=None , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : int = scope
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = EsmModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = EsmForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = EsmForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Any = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __a (lowercase__ , lowercase__ , unittest.TestCase ):
__a : Union[str, Any] = False
__a : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__a : List[Any] = ()
__a : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Dict = True
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = EsmModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = EsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ : List[str] = EsmEmbeddings(config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase_ : Dict = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase_ : Any = create_position_ids_from_input_ids(__lowerCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ : Any = EsmEmbeddings(config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = torch.empty(2 , 4 , 30 )
UpperCAmelCase_ : Union[str, Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase_ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase_ : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(__lowerCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
pass
@require_torch
class __a (lowercase__ ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : int = model(__lowerCamelCase )[0]
UpperCAmelCase_ : Dict = 33
UpperCAmelCase_ : int = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCAmelCase_ : Any = torch.tensor(
[[[8.9_2_1_5, -10.58_98, -6.4_6_7_1], [-6.3_9_6_7, -13.91_14, -1.1_2_1_2], [-7.7_8_1_2, -13.95_16, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ : int = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
UpperCAmelCase_ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[int] = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Dict = (32, 32)
UpperCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCAmelCase_ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
def extract(*__magic_name__ : List[Any] , **__magic_name__ : Dict ):
class __a :
def __init__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = torch.ones([0] )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
self.pixel_values.to(UpperCAmelCase_ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.dummy_cond_unet
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCAmelCase_ : List[str] = self.dummy_vae
UpperCAmelCase_ : str = self.dummy_text_encoder
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCAmelCase_ : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : List[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase_ , )[0]
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_vae
UpperCAmelCase_ : Optional[int] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Tuple = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : List[str] = 'A painting of a squirrel eating a burger'
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase_ , )[0]
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert isinstance(pipe.scheduler , UpperCAmelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
UpperCAmelCase_ : Any = self.dummy_vae
UpperCAmelCase_ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Any = unet.half()
UpperCAmelCase_ : Tuple = vae.half()
UpperCAmelCase_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Dict = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : Any = 'A painting of a squirrel eating a burger'
UpperCAmelCase_ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCAmelCase_ : Any = 40_03_66_03_46
UpperCAmelCase_ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : int = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Tuple = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCAmelCase_ )
UpperCAmelCase_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Dict = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCAmelCase_ : Tuple = 27_34_97_17_55
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Tuple = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : List[str] = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase_ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCAmelCase_ : Any = 10_44_35_52_34
UpperCAmelCase_ : Optional[int] = 12
UpperCAmelCase_ : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : int = torch.manual_seed(UpperCAmelCase_ )
UpperCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCamelCase : Any = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowerCamelCase : Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowerCamelCase : Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowerCamelCase : Any = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowerCamelCase : Union[str, Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=[1, 10, 1_00] , __magic_name__ : Dict=4 , __magic_name__ : Tuple=3.0 ) -> List[Any]:
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=_lowercase ) as executor:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Union[str, Any] = Counter()
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Any = defaultdict(_lowercase )
for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ):
for candidate in candidates:
UpperCAmelCase_ : str = candidate + '\n' + test_case
UpperCAmelCase_ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase_ : List[Any] = executor.submit(_lowercase , *_lowercase )
futures.append(_lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowercase ):
UpperCAmelCase_ : Optional[Any] = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
UpperCAmelCase_ : Optional[Any] = [], []
for result in results.values():
result.sort()
UpperCAmelCase_ : Dict = [r[1]['passed'] for r in result]
total.append(len(_lowercase ) )
correct.append(sum(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = np.array(_lowercase )
UpperCAmelCase_ : List[Any] = np.array(_lowercase )
UpperCAmelCase_ : List[str] = k
UpperCAmelCase_ : Dict = {F"""pass@{k}""": estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Any ) -> Any:
def estimator(SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[Any] = itertools.repeat(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )] )
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : str = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
snake_case_ : List[Any] = {
"Salesforce/codegen-350M-mono": 20_48,
}
class __a (__A ):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = ["input_ids", "attention_mask"]
__a : List[Any] = CodeGenTokenizer
def __init__( self : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str="<|endoftext|>" , __magic_name__ : Dict="<|endoftext|>" , __magic_name__ : Any="<|endoftext|>" , __magic_name__ : Optional[int]=False , **__magic_name__ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , )
if kwargs.pop('''add_bos_token''' , __magic_name__ ):
UpperCAmelCase_ : List[str] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n"""
F"""`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
UpperCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
UpperCAmelCase_ : List[Any] = getattr(__magic_name__ , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ : Tuple = add_prefix_space
UpperCAmelCase_ : Union[str, Any] = pre_tok_class(**__magic_name__ )
UpperCAmelCase_ : str = add_prefix_space
def UpperCAmelCase__ ( self : Tuple , *__magic_name__ : Optional[int] , **__magic_name__ : str ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase_ : List[str] = kwargs.get('''is_split_into_words''' , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , *__magic_name__ : Any , **__magic_name__ : str ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase_ : str = kwargs.get('''is_split_into_words''' , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __magic_name__ : bool = False , __magic_name__ : bool = None , __magic_name__ : Optional[List[str]] = None , **__magic_name__ : Dict , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = super().decode(
token_ids=__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , **__magic_name__ , )
if truncate_before_pattern is not None and len(__magic_name__ ) > 0:
UpperCAmelCase_ : List[str] = self.truncate(__magic_name__ , __magic_name__ )
return decoded_text
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
def find_re(__magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : int ):
UpperCAmelCase_ : Union[str, Any] = pattern.search(__magic_name__ , __magic_name__ )
return m.start() if m else -1
UpperCAmelCase_ : str = [re.compile(__magic_name__ , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase_ : Any = list(re.finditer('''^print''' , __magic_name__ , re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCAmelCase_ : List[str] = completion[: prints[1].start()]
UpperCAmelCase_ : Optional[int] = list(re.finditer('''^def''' , __magic_name__ , re.MULTILINE ) )
if len(__magic_name__ ) > 1:
UpperCAmelCase_ : Optional[Any] = completion[: defs[1].start()]
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : int = [
pos for pos in [find_re(__magic_name__ , __magic_name__ , __magic_name__ ) for terminal in terminals] if pos != -1
]
if len(__magic_name__ ) > 0:
return completion[: min(__magic_name__ )]
else:
return completion
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
import re
import subprocess
import sys
snake_case_ : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
snake_case_ : List[Any] = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
snake_case_ : List[Any] = '|'.join(sys.argv[1:])
snake_case_ : str = re.compile(rf'''^({joined_dirs}).*?\.py$''')
snake_case_ : Any = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase_ )
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.dummy_uncond_unet
UpperCAmelCase_ : str = DDIMScheduler()
UpperCAmelCase_ : Optional[Any] = self.dummy_vq_model
UpperCAmelCase_ : Union[str, Any] = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Any = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=UpperCamelCase_ )[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCAmelCase_ : str = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : List[str] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCAmelCase_ : List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {'vocab_file': 'spiece.model'}
snake_case_ : Union[str, Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
snake_case_ : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
snake_case_ : Optional[int] = '▁'
class __a (lowerCamelCase ):
__a : Tuple = VOCAB_FILES_NAMES
__a : str = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , __magic_name__ : Any , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=False , __magic_name__ : str="[CLS]" , __magic_name__ : int="[SEP]" , __magic_name__ : Dict="<unk>" , __magic_name__ : Union[str, Any]="[SEP]" , __magic_name__ : Dict="<pad>" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Any="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : Dict = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
UpperCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : int = remove_space
UpperCAmelCase_ : List[Any] = keep_accents
UpperCAmelCase_ : int = vocab_file
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : int = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.remove_space:
UpperCAmelCase_ : List[Any] = " ".join(inputs.strip().split() )
else:
UpperCAmelCase_ : List[str] = inputs
UpperCAmelCase_ : str = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
UpperCAmelCase_ : Tuple = unicodedata.normalize('''NFKD''' , __A )
UpperCAmelCase_ : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.preprocess_text(__A )
UpperCAmelCase_ : Optional[Any] = self.sp_model.encode(__A , out_type=__A )
UpperCAmelCase_ : Union[str, Any] = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase_ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase_ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__A )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__A )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(__A )
UpperCAmelCase_ : Optional[Any] = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def UpperCAmelCase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : str = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __a (UpperCamelCase_ ):
def __init__( self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : str , **__magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.events.append('''on_init_end''' )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Dict , **__magic_name__ : Tuple ) -> str:
"""simple docstring"""
self.events.append('''on_train_begin''' )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , **__magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.events.append('''on_train_end''' )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : int , **__magic_name__ : Tuple ) -> str:
"""simple docstring"""
self.events.append('''on_epoch_begin''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] , **__magic_name__ : str ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_epoch_end''' )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : str , **__magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
self.events.append('''on_step_begin''' )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Any , **__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
self.events.append('''on_step_end''' )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , **__magic_name__ : str ) -> int:
"""simple docstring"""
self.events.append('''on_evaluate''' )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , **__magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_predict''' )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.events.append('''on_save''' )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Any , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
self.events.append('''on_log''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Dict , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
self.events.append('''on_prediction_step''' )
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str]=0 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : str=64 , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=None , __magic_name__ : Any=False , **__magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : List[Any] = RegressionDataset(length=_a )
UpperCAmelCase_ : List[str] = RegressionModelConfig(a=_a , b=_a )
UpperCAmelCase_ : Union[str, Any] = RegressionPreTrainedModel(_a )
UpperCAmelCase_ : Any = TrainingArguments(self.output_dir , disable_tqdm=_a , report_to=[] , **_a )
return Trainer(
_a , _a , train_dataset=_a , eval_dataset=_a , callbacks=_a , )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
self.assertEqual(len(_a ) , len(_a ) )
# Order doesn't matter
UpperCAmelCase_ : List[str] = sorted(_a , key=lambda __magic_name__ : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ )
UpperCAmelCase_ : List[Any] = sorted(_a , key=lambda __magic_name__ : cb.__name__ if isinstance(_a , _a ) else cb.__class__.__name__ )
for cba, cba in zip(_a , _a ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
self.assertEqual(_a , _a )
elif isinstance(_a , _a ) and not isinstance(_a , _a ):
self.assertEqual(_a , cba.__class__ )
elif not isinstance(_a , _a ) and isinstance(_a , _a ):
self.assertEqual(cba.__class__ , _a )
else:
self.assertEqual(_a , _a )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ : Union[str, Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(_a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_trainer()
UpperCAmelCase_ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ : List[Any] = self.get_trainer(disable_tqdm=_a )
UpperCAmelCase_ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_a )
expected_callbacks.remove(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
UpperCAmelCase_ : Optional[int] = self.get_trainer()
UpperCAmelCase_ : List[Any] = trainer.pop_callback(_a )
self.assertEqual(cb.__class__ , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
trainer.add_callback(_a )
expected_callbacks.insert(0 , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
# We can also add, pop, or remove by instance
UpperCAmelCase_ : Optional[Any] = self.get_trainer()
UpperCAmelCase_ : Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_a )
expected_callbacks.remove(_a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
UpperCAmelCase_ : List[Any] = self.get_trainer()
UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ : int = trainer.pop_callback(_a )
self.assertEqual(_a , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
trainer.add_callback(_a )
expected_callbacks.insert(0 , _a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _a )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=_a )
UpperCAmelCase_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# Independent log/save/eval
UpperCAmelCase_ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
UpperCAmelCase_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
UpperCAmelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
UpperCAmelCase_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# A bit of everything
UpperCAmelCase_ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase_ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_a , self.get_expected_events(_a ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase_ : List[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_a ) in warn_mock.call_args[0][0]
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __a (_snake_case ):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = 8
# DPR tok
UpperCAmelCase_ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
UpperCAmelCase_ : Any = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ : str = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
UpperCAmelCase_ : str = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[str] = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_dummy_dataset()
UpperCAmelCase_ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase_ : Optional[Any] = dataset
UpperCAmelCase_ : Dict = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : bool ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_dummy_dataset()
UpperCAmelCase_ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , '''dataset''' )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
UpperCAmelCase_ : Union[str, Any] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase_ : Optional[Any] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __magic_name__ ) , )
return retriever
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
UpperCAmelCase_ : List[str] = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__magic_name__ , open(__magic_name__ , '''wb''' ) )
UpperCAmelCase_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
UpperCAmelCase_ : int = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase_ : int = self.get_dummy_dataset()
retriever.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
UpperCAmelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
UpperCAmelCase_ : int = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
UpperCAmelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
UpperCAmelCase_ : str = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : int = self.get_dummy_legacy_index_retriever()
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : Union[str, Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
import torch
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase_ : int = [[5, 7], [10, 11]]
UpperCAmelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : Any = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , np.ndarray )
UpperCAmelCase_ : Optional[Any] = retriever(
__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ , return_tensors='''pt''' , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
retriever.set_ctx_encoder_tokenizer(__magic_name__ )
UpperCAmelCase_ : Any = [[5, 7], [10, 11]]
UpperCAmelCase_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase_ : str = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
self.assertEqual(
len(__magic_name__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __magic_name__ ) # check for doc token related keys in dictionary.
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
snake_case_ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Any:
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCAmelCase_ : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
UpperCAmelCase_ : Union[str, Any] = test_hf_cache_home / '''datasets'''
UpperCAmelCase_ : List[str] = test_hf_cache_home / '''metrics'''
UpperCAmelCase_ : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(lowerCAmelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(lowerCAmelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(lowerCAmelCase__ ) )
UpperCAmelCase_ : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(lowerCAmelCase__ ) )
UpperCAmelCase_ : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__, scope='''session''' )
def lowerCamelCase_ ( ) -> List[Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', lowerCAmelCase__ )
@pytest.fixture
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', lowerCAmelCase__ )
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
from math import factorial, radians
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Tuple = 18, SCREAMING_SNAKE_CASE__ : Optional[Any] = 10 ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
UpperCAmelCase_ : List[Any] = radians(__A )
UpperCAmelCase_ : Tuple = angle_in_radians
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : Any = -1
for _ in range(__A ):
result += (b * (angle_in_radians**a)) / factorial(__A )
UpperCAmelCase_ : List[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__A, __A )
if __name__ == "__main__":
__import__("doctest").testmod()
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
UpperCAmelCase_ : List[str] = [0] * len(_lowerCamelCase )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Optional[int] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
UpperCAmelCase_ : Any = queue.pop(0 )
cnt += 1
topo.append(_lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
if cnt != len(_lowerCamelCase ):
print('''Cycle exists''' )
else:
print(_lowerCamelCase )
# Adjacency List of Graph
snake_case_ : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase_ : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
snake_case_ : int = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __a (_UpperCAmelCase ):
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : ArgumentParser ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCamelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self : List[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , *__magic_name__ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
UpperCAmelCase_ : Optional[Any] = model_type
UpperCAmelCase_ : List[Any] = tf_checkpoint
UpperCAmelCase_ : List[str] = pytorch_dump_output
UpperCAmelCase_ : Optional[int] = config
UpperCAmelCase_ : str = finetuning_task_name
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ : Dict = self._tf_checkpoint
UpperCAmelCase_ : Tuple = ''''''
else:
UpperCAmelCase_ : List[Any] = self._tf_checkpoint
UpperCAmelCase_ : Optional[int] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase_ , self._config , self._pytorch_dump_output , lowerCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Dict = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __a (_UpperCAmelCase ):
__a : str = 'markuplm'
def __init__( self : Dict , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : int=12 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : str="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[Any]=5_12 , __magic_name__ : str=2 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Optional[int]=1E-12 , __magic_name__ : Optional[Any]=0 , __magic_name__ : Dict=0 , __magic_name__ : Dict=2 , __magic_name__ : int=2_56 , __magic_name__ : Union[str, Any]=10_24 , __magic_name__ : Optional[int]=2_16 , __magic_name__ : List[Any]=10_01 , __magic_name__ : Optional[int]=32 , __magic_name__ : List[str]=50 , __magic_name__ : List[Any]="absolute" , __magic_name__ : Optional[Any]=True , __magic_name__ : int=None , **__magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : int = classifier_dropout
# additional properties
UpperCAmelCase_ : List[Any] = max_depth
UpperCAmelCase_ : List[Any] = max_xpath_tag_unit_embeddings
UpperCAmelCase_ : Optional[Any] = max_xpath_subs_unit_embeddings
UpperCAmelCase_ : Dict = tag_pad_id
UpperCAmelCase_ : Any = subs_pad_id
UpperCAmelCase_ : Optional[Any] = xpath_unit_hidden_size
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] = 1000000, SCREAMING_SNAKE_CASE__ : Any = 10 ) -> int:
UpperCAmelCase_ : List[str] = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
UpperCAmelCase_ : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
snake_case_ : List[Any] = logging.get_logger(__name__)
class __a (SCREAMING_SNAKE_CASE__ ):
__a : int = "upernet"
def __init__( self : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=5_12 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Any=[1, 2, 3, 6] , __magic_name__ : Tuple=True , __magic_name__ : Any=0.4 , __magic_name__ : Optional[Any]=3_84 , __magic_name__ : int=2_56 , __magic_name__ : List[str]=1 , __magic_name__ : Any=False , __magic_name__ : str=2_55 , **__magic_name__ : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_lowercase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : List[Any] = backbone_config.get('''model_type''' )
UpperCAmelCase_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(_lowercase )
UpperCAmelCase_ : str = backbone_config
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = pool_scales
UpperCAmelCase_ : Optional[Any] = use_auxiliary_head
UpperCAmelCase_ : Any = auxiliary_loss_weight
UpperCAmelCase_ : List[Any] = auxiliary_in_channels
UpperCAmelCase_ : str = auxiliary_channels
UpperCAmelCase_ : Dict = auxiliary_num_convs
UpperCAmelCase_ : List[Any] = auxiliary_concat_input
UpperCAmelCase_ : int = loss_ignore_index
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.backbone_config.to_dict()
UpperCAmelCase_ : Dict = self.__class__.model_type
return output
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a (_lowercase ):
__a : int = ['''image_processor''', '''tokenizer''']
__a : Optional[int] = '''BlipImageProcessor'''
__a : Any = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , __magic_name__ : str , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = False
super().__init__(A_ , A_ )
UpperCAmelCase_ : Optional[int] = self.image_processor
def __call__( self : Dict , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Tuple , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCAmelCase_ : List[Any] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCAmelCase_ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def UpperCAmelCase__ ( self : int , *__magic_name__ : Any , **__magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def UpperCAmelCase__ ( self : List[str] , *__magic_name__ : str , **__magic_name__ : Any ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer.model_input_names
UpperCAmelCase_ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case_ : Optional[int] = logging.get_logger(__name__)
enable_full_determinism()
class __a (lowercase__ , lowercase__ , unittest.TestCase ):
__a : Union[str, Any] = UNetaDModel
__a : str = 'sample'
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Union[str, Any] = (32, 32)
UpperCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = torch.tensor([10] ).to(__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class __a (lowercase__ , lowercase__ , unittest.TestCase ):
__a : str = UNetaDModel
__a : Tuple = 'sample'
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([10] ).to(__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return (4, 32, 32)
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__magic_name__ )
UpperCAmelCase_ : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
model.to(__magic_name__ )
UpperCAmelCase_ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCAmelCase_ , UpperCAmelCase_ : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
model_accelerate.to(__magic_name__ )
model_accelerate.eval()
UpperCAmelCase_ : str = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ : str = noise.to(__magic_name__ )
UpperCAmelCase_ : Tuple = torch.tensor([10] * noise.shape[0] ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model_accelerate(__magic_name__ , __magic_name__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ , low_cpu_mem_usage=__magic_name__ )
model_normal_load.to(__magic_name__ )
model_normal_load.eval()
UpperCAmelCase_ : List[Any] = model_normal_load(__magic_name__ , __magic_name__ )['''sample''']
assert torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__magic_name__ )
UpperCAmelCase_ : str = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ : List[Any] = noise.to(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(__magic_name__ )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__magic_name__ , __magic_name__ ).sample
UpperCAmelCase_ : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ : str = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-3 ) )
class __a (lowercase__ , lowercase__ , unittest.TestCase ):
__a : Optional[int] = UNetaDModel
__a : str = 'sample'
@property
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any]=(32, 32) ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = self.dummy_input
UpperCAmelCase_ : str = floats_tensor((4, 3) + (2_56, 2_56) ).to(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = noise
UpperCAmelCase_ : List[str] = model(**__magic_name__ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__magic_name__ )
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : int = (2_56, 2_56)
UpperCAmelCase_ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor(batch_size * [1E-4] ).to(__magic_name__ )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(__magic_name__ , __magic_name__ ).sample
UpperCAmelCase_ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase_ : int = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = torch.tensor(batch_size * [1E-4] ).to(__magic_name__ )
with torch.no_grad():
UpperCAmelCase_ : Any = model(__magic_name__ , __magic_name__ ).sample
UpperCAmelCase_ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase_ : int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
# not required for this model
pass
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Optional[Any] = StableDiffusionSAGPipeline
__a : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__a : int = TEXT_TO_IMAGE_BATCH_PARAMS
__a : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Dict = False
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : Tuple = CLIPTextModel(__magic_name__ )
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str=0 ) -> Tuple:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ : Optional[int] = sag_pipe.to(__magic_name__ )
sag_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sag_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ : Any = sag_pipe.to(__magic_name__ )
sag_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = sag_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ : Tuple = sag_pipe.to(__magic_name__ )
sag_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = '''.'''
UpperCAmelCase_ : str = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__magic_name__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCAmelCase_ : Optional[int] = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __a :
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[Any]=13 , __magic_name__ : str=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Optional[Any]=1 , __magic_name__ : int=32 , __magic_name__ : Optional[Any]=4 , __magic_name__ : Optional[Any]=8 , __magic_name__ : List[str]=37 , __magic_name__ : Optional[int]="gelu_new" , __magic_name__ : str=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : List[Any]=5_12 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[Any]=0.0_2 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=4 , __magic_name__ : int=None , __magic_name__ : Tuple=False , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = block_sizes
UpperCAmelCase_ : Optional[Any] = num_decoder_layers
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Dict = n_head
UpperCAmelCase_ : Union[str, Any] = d_head
UpperCAmelCase_ : Dict = d_inner
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : List[Any] = activation_dropout
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : Dict = num_choices
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : int = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCAmelCase_ : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
UpperCAmelCase_ : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCAmelCase_ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCAmelCase_ : int = self.num_hidden_layers + 2
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : int , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = TFFunnelModel(config=__A )
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : Tuple = model(__A )
UpperCAmelCase_ : List[str] = [input_ids, input_mask]
UpperCAmelCase_ : str = model(__A )
UpperCAmelCase_ : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[int] = TFFunnelModel(config=__A )
UpperCAmelCase_ : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Any = TFFunnelModel(config=__A )
UpperCAmelCase_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = TFFunnelBaseModel(config=__A )
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : Optional[int] = model(__A )
UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask]
UpperCAmelCase_ : List[str] = model(__A )
UpperCAmelCase_ : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = TFFunnelBaseModel(config=__A )
UpperCAmelCase_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Union[str, Any] = TFFunnelBaseModel(config=__A )
UpperCAmelCase_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFFunnelForPreTraining(config=__A )
UpperCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : str = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TFFunnelForMaskedLM(config=__A )
UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = TFFunnelForSequenceClassification(config=__A )
UpperCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : Union[str, Any] = TFFunnelForMultipleChoice(config=__A )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.num_labels
UpperCAmelCase_ : str = TFFunnelForTokenClassification(config=__A )
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = TFFunnelForQuestionAnswering(config=__A )
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase_ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Tuple = config_and_inputs
UpperCAmelCase_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a : Union[str, Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : Optional[Any] = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = TFFunnelModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__A )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@require_tf
class __a (UpperCamelCase_ , unittest.TestCase ):
__a : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a : int = False
__a : List[str] = False
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = TFFunnelModelTester(self , base=__A )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__A )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__A )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
snake_case_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowercase__ ):
def __init__( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self : str , __magic_name__ : int = 1 , __magic_name__ : int = 1_00 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[float] = None , __magic_name__ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
UpperCAmelCase_ : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase_ : Tuple = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase_ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase_ : str = int(__magic_name__ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase_ : List[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase_ : Tuple = int(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase_ : str = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_ : Any = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
# set step values
self.scheduler.set_timesteps(__magic_name__ , device=audio.device )
UpperCAmelCase_ : Optional[int] = self.scheduler.timesteps.to(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ : Any = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase_ : Any = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
UpperCAmelCase_ : List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase_ : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__magic_name__ )
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import math
snake_case_ : Union[str, Any] = 10
snake_case_ : List[str] = 7
snake_case_ : Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 20 ) -> List[str]:
UpperCAmelCase_ : Dict = math.comb(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __a (__a ):
__a : Optional[int] = "luke"
def __init__( self : Dict , __magic_name__ : str=5_02_67 , __magic_name__ : Tuple=50_00_00 , __magic_name__ : Tuple=7_68 , __magic_name__ : Optional[Any]=2_56 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Tuple=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Any="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : Union[str, Any]=1E-12 , __magic_name__ : Any=True , __magic_name__ : Any=None , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=0 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = entity_vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = entity_emb_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = use_entity_aware_attention
UpperCAmelCase_ : List[Any] = classifier_dropout
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : List[str] = StableDiffusionXLImgaImgPipeline
__a : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
__a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCAmelCase_ : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
UpperCAmelCase_ : int = CLIPTextModel(__lowerCAmelCase )
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = CLIPTextModelWithProjection(__lowerCAmelCase )
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
UpperCAmelCase_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : List[str]=0 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
UpperCAmelCase_ : int = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(__lowerCAmelCase )
else:
UpperCAmelCase_ : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCAmelCase_ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase_ : int = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase_ : int = sd_pipe(**__lowerCAmelCase ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : str = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : str = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__lowerCAmelCase )
UpperCAmelCase_ : List[str] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
UpperCAmelCase_ : Any = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase_ : str = 3 * ['''this is a negative prompt''']
UpperCAmelCase_ : Optional[Any] = negative_prompt
UpperCAmelCase_ : str = 3 * [inputs['''prompt''']]
UpperCAmelCase_ : Any = sd_pipe(**__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCAmelCase_ : Any = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase_ : str = 3 * ['''this is a negative prompt''']
UpperCAmelCase_ : int = 3 * [inputs.pop('''prompt''' )]
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
UpperCAmelCase_ : Any = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
UpperCAmelCase_ : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str , __magic_name__ : List[Any] , __magic_name__ : int="cpu" , __magic_name__ : Tuple=torch.floataa , __magic_name__ : int=0 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCAmelCase_ : Tuple = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
UpperCAmelCase_ : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = self.get_inputs(__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = pipe(**__lowerCAmelCase ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : str = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
snake_case_ : Optional[Any] = TypeVar("_T")
class __a (Generic[_T] ):
def __init__( self : str , __magic_name__ : Iterable[_T] | None = None ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : list[_T] = list(iterable or [] )
UpperCAmelCase_ : list[_T] = []
def __len__( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : _T ) -> Optional[int]:
"""simple docstring"""
self._stacka.append(lowercase_ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self._stacka.pop
UpperCAmelCase_ : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a (metaclass=a__ ):
__a : Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Tuple , *__magic_name__ : int , **__magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *__magic_name__ : Optional[Any] , **__magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *__magic_name__ : str , **__magic_name__ : List[str] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.